vllm-cpu-amxbf16 0.11.2.post2__cp310-cp310-manylinux_2_17_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vllm/_C.abi3.so +0 -0
- vllm/__init__.py +225 -0
- vllm/_aiter_ops.py +983 -0
- vllm/_bc_linter.py +54 -0
- vllm/_custom_ops.py +2863 -0
- vllm/_ipex_ops.py +457 -0
- vllm/_version.py +34 -0
- vllm/assets/__init__.py +0 -0
- vllm/assets/audio.py +43 -0
- vllm/assets/base.py +40 -0
- vllm/assets/image.py +59 -0
- vllm/assets/video.py +149 -0
- vllm/attention/__init__.py +18 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +391 -0
- vllm/attention/backends/registry.py +195 -0
- vllm/attention/backends/utils.py +33 -0
- vllm/attention/layer.py +1052 -0
- vllm/attention/layers/__init__.py +0 -0
- vllm/attention/layers/chunked_local_attention.py +121 -0
- vllm/attention/layers/cross_attention.py +178 -0
- vllm/attention/layers/encoder_only_attention.py +103 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/chunked_prefill_paged_decode.py +401 -0
- vllm/attention/ops/common.py +414 -0
- vllm/attention/ops/flashmla.py +251 -0
- vllm/attention/ops/merge_attn_states.py +47 -0
- vllm/attention/ops/paged_attn.py +262 -0
- vllm/attention/ops/pallas_kv_cache_update.py +130 -0
- vllm/attention/ops/prefix_prefill.py +814 -0
- vllm/attention/ops/rocm_aiter_paged_attn.py +123 -0
- vllm/attention/ops/triton_decode_attention.py +712 -0
- vllm/attention/ops/triton_merge_attn_states.py +105 -0
- vllm/attention/ops/triton_reshape_and_cache_flash.py +184 -0
- vllm/attention/ops/triton_unified_attention.py +941 -0
- vllm/attention/ops/vit_attn_wrappers.py +178 -0
- vllm/attention/selector.py +231 -0
- vllm/attention/utils/__init__.py +0 -0
- vllm/attention/utils/fa_utils.py +109 -0
- vllm/attention/utils/kv_sharing_utils.py +33 -0
- vllm/attention/utils/kv_transfer_utils.py +60 -0
- vllm/beam_search.py +88 -0
- vllm/benchmarks/__init__.py +0 -0
- vllm/benchmarks/datasets.py +3222 -0
- vllm/benchmarks/latency.py +172 -0
- vllm/benchmarks/lib/__init__.py +3 -0
- vllm/benchmarks/lib/endpoint_request_func.py +777 -0
- vllm/benchmarks/lib/ready_checker.py +72 -0
- vllm/benchmarks/lib/utils.py +79 -0
- vllm/benchmarks/serve.py +1531 -0
- vllm/benchmarks/sweep/__init__.py +0 -0
- vllm/benchmarks/sweep/cli.py +38 -0
- vllm/benchmarks/sweep/param_sweep.py +91 -0
- vllm/benchmarks/sweep/plot.py +580 -0
- vllm/benchmarks/sweep/serve.py +416 -0
- vllm/benchmarks/sweep/serve_sla.py +492 -0
- vllm/benchmarks/sweep/server.py +114 -0
- vllm/benchmarks/sweep/sla_sweep.py +132 -0
- vllm/benchmarks/sweep/utils.py +4 -0
- vllm/benchmarks/throughput.py +799 -0
- vllm/collect_env.py +857 -0
- vllm/compilation/__init__.py +0 -0
- vllm/compilation/activation_quant_fusion.py +209 -0
- vllm/compilation/backends.py +759 -0
- vllm/compilation/base_static_graph.py +57 -0
- vllm/compilation/caching.py +178 -0
- vllm/compilation/collective_fusion.py +1234 -0
- vllm/compilation/compiler_interface.py +639 -0
- vllm/compilation/counter.py +48 -0
- vllm/compilation/cuda_graph.py +208 -0
- vllm/compilation/decorators.py +571 -0
- vllm/compilation/fix_functionalization.py +253 -0
- vllm/compilation/fusion.py +374 -0
- vllm/compilation/fusion_attn.py +359 -0
- vllm/compilation/fx_utils.py +91 -0
- vllm/compilation/inductor_pass.py +133 -0
- vllm/compilation/matcher_utils.py +317 -0
- vllm/compilation/monitor.py +62 -0
- vllm/compilation/noop_elimination.py +134 -0
- vllm/compilation/partition_rules.py +72 -0
- vllm/compilation/pass_manager.py +135 -0
- vllm/compilation/piecewise_backend.py +121 -0
- vllm/compilation/post_cleanup.py +21 -0
- vllm/compilation/qk_norm_rope_fusion.py +238 -0
- vllm/compilation/sequence_parallelism.py +363 -0
- vllm/compilation/torch25_custom_graph_pass.py +44 -0
- vllm/compilation/vllm_inductor_pass.py +173 -0
- vllm/compilation/wrapper.py +238 -0
- vllm/config/__init__.py +102 -0
- vllm/config/cache.py +207 -0
- vllm/config/compilation.py +975 -0
- vllm/config/device.py +75 -0
- vllm/config/ec_transfer.py +110 -0
- vllm/config/kv_events.py +56 -0
- vllm/config/kv_transfer.py +114 -0
- vllm/config/load.py +124 -0
- vllm/config/lora.py +112 -0
- vllm/config/model.py +2162 -0
- vllm/config/multimodal.py +248 -0
- vllm/config/observability.py +123 -0
- vllm/config/parallel.py +655 -0
- vllm/config/pooler.py +122 -0
- vllm/config/scheduler.py +298 -0
- vllm/config/speculative.py +654 -0
- vllm/config/speech_to_text.py +38 -0
- vllm/config/structured_outputs.py +92 -0
- vllm/config/utils.py +178 -0
- vllm/config/vllm.py +1166 -0
- vllm/connections.py +189 -0
- vllm/device_allocator/__init__.py +0 -0
- vllm/device_allocator/cumem.py +327 -0
- vllm/distributed/__init__.py +6 -0
- vllm/distributed/communication_op.py +43 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/all2all.py +490 -0
- vllm/distributed/device_communicators/all_reduce_utils.py +344 -0
- vllm/distributed/device_communicators/base_device_communicator.py +297 -0
- vllm/distributed/device_communicators/cpu_communicator.py +209 -0
- vllm/distributed/device_communicators/cuda_communicator.py +340 -0
- vllm/distributed/device_communicators/cuda_wrapper.py +216 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +326 -0
- vllm/distributed/device_communicators/mnnvl_compat.py +27 -0
- vllm/distributed/device_communicators/pynccl.py +386 -0
- vllm/distributed/device_communicators/pynccl_allocator.py +191 -0
- vllm/distributed/device_communicators/pynccl_wrapper.py +564 -0
- vllm/distributed/device_communicators/quick_all_reduce.py +290 -0
- vllm/distributed/device_communicators/ray_communicator.py +259 -0
- vllm/distributed/device_communicators/shm_broadcast.py +733 -0
- vllm/distributed/device_communicators/shm_object_storage.py +660 -0
- vllm/distributed/device_communicators/symm_mem.py +156 -0
- vllm/distributed/device_communicators/tpu_communicator.py +107 -0
- vllm/distributed/device_communicators/xpu_communicator.py +95 -0
- vllm/distributed/ec_transfer/__init__.py +14 -0
- vllm/distributed/ec_transfer/ec_connector/__init__.py +0 -0
- vllm/distributed/ec_transfer/ec_connector/base.py +247 -0
- vllm/distributed/ec_transfer/ec_connector/factory.py +88 -0
- vllm/distributed/ec_transfer/ec_connector/shared_storage_connector.py +201 -0
- vllm/distributed/ec_transfer/ec_transfer_state.py +42 -0
- vllm/distributed/eplb/__init__.py +8 -0
- vllm/distributed/eplb/eplb_state.py +837 -0
- vllm/distributed/eplb/rebalance_algo.py +260 -0
- vllm/distributed/eplb/rebalance_execute.py +431 -0
- vllm/distributed/kv_events.py +371 -0
- vllm/distributed/kv_transfer/README.md +29 -0
- vllm/distributed/kv_transfer/__init__.py +20 -0
- vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
- vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
- vllm/distributed/kv_transfer/kv_connector/factory.py +192 -0
- vllm/distributed/kv_transfer/kv_connector/utils.py +268 -0
- vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +19 -0
- vllm/distributed/kv_transfer/kv_connector/v1/base.py +546 -0
- vllm/distributed/kv_transfer/kv_connector/v1/decode_bench_connector.py +419 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +216 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/__init__.py +18 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/multi_process_adapter.py +379 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/utils.py +221 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py +1411 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_mp_connector.py +867 -0
- vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +189 -0
- vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +454 -0
- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +2440 -0
- vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +504 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +531 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +632 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +273 -0
- vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +450 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +179 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +164 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +242 -0
- vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_pipe/base.py +66 -0
- vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +295 -0
- vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +285 -0
- vllm/distributed/kv_transfer/kv_transfer_state.py +78 -0
- vllm/distributed/parallel_state.py +1759 -0
- vllm/distributed/tpu_distributed_utils.py +188 -0
- vllm/distributed/utils.py +543 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +2144 -0
- vllm/engine/async_llm_engine.py +6 -0
- vllm/engine/llm_engine.py +6 -0
- vllm/engine/protocol.py +170 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/anthropic/__init__.py +0 -0
- vllm/entrypoints/anthropic/protocol.py +162 -0
- vllm/entrypoints/anthropic/serving_messages.py +460 -0
- vllm/entrypoints/api_server.py +184 -0
- vllm/entrypoints/chat_utils.py +1690 -0
- vllm/entrypoints/cli/__init__.py +13 -0
- vllm/entrypoints/cli/benchmark/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/base.py +25 -0
- vllm/entrypoints/cli/benchmark/latency.py +21 -0
- vllm/entrypoints/cli/benchmark/main.py +56 -0
- vllm/entrypoints/cli/benchmark/serve.py +21 -0
- vllm/entrypoints/cli/benchmark/sweep.py +21 -0
- vllm/entrypoints/cli/benchmark/throughput.py +21 -0
- vllm/entrypoints/cli/collect_env.py +38 -0
- vllm/entrypoints/cli/main.py +79 -0
- vllm/entrypoints/cli/openai.py +256 -0
- vllm/entrypoints/cli/run_batch.py +68 -0
- vllm/entrypoints/cli/serve.py +249 -0
- vllm/entrypoints/cli/types.py +29 -0
- vllm/entrypoints/constants.py +10 -0
- vllm/entrypoints/context.py +572 -0
- vllm/entrypoints/dynamic_lora.py +57 -0
- vllm/entrypoints/harmony_utils.py +535 -0
- vllm/entrypoints/launcher.py +175 -0
- vllm/entrypoints/llm.py +1768 -0
- vllm/entrypoints/logger.py +84 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +2096 -0
- vllm/entrypoints/openai/cli_args.py +302 -0
- vllm/entrypoints/openai/orca_metrics.py +120 -0
- vllm/entrypoints/openai/protocol.py +3299 -0
- vllm/entrypoints/openai/run_batch.py +547 -0
- vllm/entrypoints/openai/serving_chat.py +1772 -0
- vllm/entrypoints/openai/serving_classification.py +235 -0
- vllm/entrypoints/openai/serving_completion.py +715 -0
- vllm/entrypoints/openai/serving_embedding.py +695 -0
- vllm/entrypoints/openai/serving_engine.py +1433 -0
- vllm/entrypoints/openai/serving_models.py +304 -0
- vllm/entrypoints/openai/serving_pooling.py +346 -0
- vllm/entrypoints/openai/serving_responses.py +2021 -0
- vllm/entrypoints/openai/serving_score.py +503 -0
- vllm/entrypoints/openai/serving_tokenization.py +203 -0
- vllm/entrypoints/openai/serving_tokens.py +269 -0
- vllm/entrypoints/openai/serving_transcription.py +148 -0
- vllm/entrypoints/openai/speech_to_text.py +405 -0
- vllm/entrypoints/openai/tool_parsers/__init__.py +142 -0
- vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +273 -0
- vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +390 -0
- vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +390 -0
- vllm/entrypoints/openai/tool_parsers/ernie45_tool_parser.py +210 -0
- vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +200 -0
- vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +273 -0
- vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +253 -0
- vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +494 -0
- vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +420 -0
- vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +227 -0
- vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +323 -0
- vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +590 -0
- vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +341 -0
- vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +290 -0
- vllm/entrypoints/openai/tool_parsers/longcat_tool_parser.py +37 -0
- vllm/entrypoints/openai/tool_parsers/minimax_m2_tool_parser.py +643 -0
- vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +849 -0
- vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +390 -0
- vllm/entrypoints/openai/tool_parsers/olmo3_tool_parser.py +366 -0
- vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +97 -0
- vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +120 -0
- vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +332 -0
- vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +781 -0
- vllm/entrypoints/openai/tool_parsers/qwen3xml_tool_parser.py +1316 -0
- vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +744 -0
- vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +303 -0
- vllm/entrypoints/openai/tool_parsers/utils.py +229 -0
- vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +556 -0
- vllm/entrypoints/renderer.py +409 -0
- vllm/entrypoints/responses_utils.py +77 -0
- vllm/entrypoints/sagemaker/__init__.py +4 -0
- vllm/entrypoints/sagemaker/routes.py +72 -0
- vllm/entrypoints/score_utils.py +242 -0
- vllm/entrypoints/ssl.py +78 -0
- vllm/entrypoints/tool.py +143 -0
- vllm/entrypoints/tool_server.py +209 -0
- vllm/entrypoints/utils.py +319 -0
- vllm/env_override.py +378 -0
- vllm/envs.py +1659 -0
- vllm/forward_context.py +356 -0
- vllm/inputs/__init__.py +44 -0
- vllm/inputs/data.py +359 -0
- vllm/inputs/parse.py +137 -0
- vllm/inputs/preprocess.py +727 -0
- vllm/logger.py +267 -0
- vllm/logging_utils/__init__.py +10 -0
- vllm/logging_utils/dump_input.py +83 -0
- vllm/logging_utils/formatter.py +77 -0
- vllm/logging_utils/log_time.py +34 -0
- vllm/logits_process.py +121 -0
- vllm/logprobs.py +208 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/layers/__init__.py +41 -0
- vllm/lora/layers/base.py +67 -0
- vllm/lora/layers/base_linear.py +164 -0
- vllm/lora/layers/column_parallel_linear.py +578 -0
- vllm/lora/layers/fused_moe.py +472 -0
- vllm/lora/layers/logits_processor.py +252 -0
- vllm/lora/layers/replicated_linear.py +70 -0
- vllm/lora/layers/row_parallel_linear.py +181 -0
- vllm/lora/layers/utils.py +65 -0
- vllm/lora/layers/vocal_parallel_embedding.py +166 -0
- vllm/lora/lora_weights.py +198 -0
- vllm/lora/models.py +890 -0
- vllm/lora/ops/__init__.py +0 -0
- vllm/lora/ops/ipex_ops/__init__.py +6 -0
- vllm/lora/ops/ipex_ops/lora_ops.py +57 -0
- vllm/lora/ops/torch_ops/__init__.py +20 -0
- vllm/lora/ops/torch_ops/lora_ops.py +128 -0
- vllm/lora/ops/triton_ops/README_TUNING.md +60 -0
- vllm/lora/ops/triton_ops/__init__.py +21 -0
- vllm/lora/ops/triton_ops/fused_moe_lora_op.py +641 -0
- vllm/lora/ops/triton_ops/kernel_utils.py +340 -0
- vllm/lora/ops/triton_ops/lora_expand_op.py +310 -0
- vllm/lora/ops/triton_ops/lora_kernel_metadata.py +154 -0
- vllm/lora/ops/triton_ops/lora_shrink_op.py +287 -0
- vllm/lora/ops/triton_ops/utils.py +295 -0
- vllm/lora/ops/xla_ops/__init__.py +6 -0
- vllm/lora/ops/xla_ops/lora_ops.py +141 -0
- vllm/lora/peft_helper.py +128 -0
- vllm/lora/punica_wrapper/__init__.py +10 -0
- vllm/lora/punica_wrapper/punica_base.py +492 -0
- vllm/lora/punica_wrapper/punica_cpu.py +351 -0
- vllm/lora/punica_wrapper/punica_gpu.py +411 -0
- vllm/lora/punica_wrapper/punica_selector.py +21 -0
- vllm/lora/punica_wrapper/punica_tpu.py +359 -0
- vllm/lora/punica_wrapper/punica_xpu.py +279 -0
- vllm/lora/punica_wrapper/utils.py +150 -0
- vllm/lora/request.py +100 -0
- vllm/lora/resolver.py +88 -0
- vllm/lora/utils.py +293 -0
- vllm/lora/worker_manager.py +279 -0
- vllm/model_executor/__init__.py +11 -0
- vllm/model_executor/custom_op.py +194 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +569 -0
- vllm/model_executor/layers/attention_layer_base.py +35 -0
- vllm/model_executor/layers/batch_invariant.py +854 -0
- vllm/model_executor/layers/conv.py +236 -0
- vllm/model_executor/layers/fla/__init__.py +8 -0
- vllm/model_executor/layers/fla/ops/__init__.py +17 -0
- vllm/model_executor/layers/fla/ops/chunk.py +240 -0
- vllm/model_executor/layers/fla/ops/chunk_delta_h.py +344 -0
- vllm/model_executor/layers/fla/ops/chunk_o.py +183 -0
- vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +154 -0
- vllm/model_executor/layers/fla/ops/cumsum.py +280 -0
- vllm/model_executor/layers/fla/ops/fused_recurrent.py +390 -0
- vllm/model_executor/layers/fla/ops/index.py +41 -0
- vllm/model_executor/layers/fla/ops/kda.py +1351 -0
- vllm/model_executor/layers/fla/ops/l2norm.py +146 -0
- vllm/model_executor/layers/fla/ops/layernorm_guard.py +396 -0
- vllm/model_executor/layers/fla/ops/op.py +60 -0
- vllm/model_executor/layers/fla/ops/solve_tril.py +556 -0
- vllm/model_executor/layers/fla/ops/utils.py +194 -0
- vllm/model_executor/layers/fla/ops/wy_fast.py +158 -0
- vllm/model_executor/layers/fused_moe/__init__.py +106 -0
- vllm/model_executor/layers/fused_moe/all2all_utils.py +160 -0
- vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +406 -0
- vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +180 -0
- vllm/model_executor/layers/fused_moe/config.py +916 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_L40S.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI308X.json +213 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_L40S.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI300X.json +201 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI355_OAM,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=32,N=1408,device_name=NVIDIA_B200.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1408,device_name=NVIDIA_B200.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H100_PCIe,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/README +12 -0
- vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +354 -0
- vllm/model_executor/layers/fused_moe/cutlass_moe.py +1052 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +387 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +416 -0
- vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +420 -0
- vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +367 -0
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +307 -0
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +362 -0
- vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +192 -0
- vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1012 -0
- vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +792 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +2175 -0
- vllm/model_executor/layers/fused_moe/fused_moe_method_base.py +112 -0
- vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py +164 -0
- vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +316 -0
- vllm/model_executor/layers/fused_moe/layer.py +1944 -0
- vllm/model_executor/layers/fused_moe/modular_kernel.py +1222 -0
- vllm/model_executor/layers/fused_moe/moe_align_block_size.py +174 -0
- vllm/model_executor/layers/fused_moe/moe_pallas.py +83 -0
- vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +229 -0
- vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
- vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +362 -0
- vllm/model_executor/layers/fused_moe/prepare_finalize.py +77 -0
- vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +265 -0
- vllm/model_executor/layers/fused_moe/routing_simulator.py +310 -0
- vllm/model_executor/layers/fused_moe/shared_fused_moe.py +97 -0
- vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +171 -0
- vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +163 -0
- vllm/model_executor/layers/fused_moe/trtllm_moe.py +143 -0
- vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py +578 -0
- vllm/model_executor/layers/fused_moe/utils.py +332 -0
- vllm/model_executor/layers/kda.py +448 -0
- vllm/model_executor/layers/layernorm.py +442 -0
- vllm/model_executor/layers/lightning_attn.py +729 -0
- vllm/model_executor/layers/linear.py +1424 -0
- vllm/model_executor/layers/logits_processor.py +106 -0
- vllm/model_executor/layers/mamba/__init__.py +0 -0
- vllm/model_executor/layers/mamba/abstract.py +71 -0
- vllm/model_executor/layers/mamba/linear_attn.py +402 -0
- vllm/model_executor/layers/mamba/mamba_mixer.py +535 -0
- vllm/model_executor/layers/mamba/mamba_mixer2.py +928 -0
- vllm/model_executor/layers/mamba/mamba_utils.py +225 -0
- vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
- vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1240 -0
- vllm/model_executor/layers/mamba/ops/layernorm_gated.py +172 -0
- vllm/model_executor/layers/mamba/ops/mamba_ssm.py +478 -0
- vllm/model_executor/layers/mamba/ops/ssd_bmm.py +211 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +456 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +700 -0
- vllm/model_executor/layers/mamba/ops/ssd_combined.py +230 -0
- vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +157 -0
- vllm/model_executor/layers/mamba/short_conv.py +264 -0
- vllm/model_executor/layers/mla.py +168 -0
- vllm/model_executor/layers/pooler.py +817 -0
- vllm/model_executor/layers/quantization/__init__.py +174 -0
- vllm/model_executor/layers/quantization/auto_round.py +454 -0
- vllm/model_executor/layers/quantization/awq.py +277 -0
- vllm/model_executor/layers/quantization/awq_marlin.py +659 -0
- vllm/model_executor/layers/quantization/awq_triton.py +337 -0
- vllm/model_executor/layers/quantization/base_config.py +170 -0
- vllm/model_executor/layers/quantization/bitblas.py +502 -0
- vllm/model_executor/layers/quantization/bitsandbytes.py +658 -0
- vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +3 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +914 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2284 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +35 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +392 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +176 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +124 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +218 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +183 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +153 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +138 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +200 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +125 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +219 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +260 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +173 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +64 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
- vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +224 -0
- vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
- vllm/model_executor/layers/quantization/deepspeedfp.py +218 -0
- vllm/model_executor/layers/quantization/experts_int8.py +240 -0
- vllm/model_executor/layers/quantization/fbgemm_fp8.py +195 -0
- vllm/model_executor/layers/quantization/fp8.py +1333 -0
- vllm/model_executor/layers/quantization/fp_quant.py +420 -0
- vllm/model_executor/layers/quantization/gguf.py +643 -0
- vllm/model_executor/layers/quantization/gptq.py +393 -0
- vllm/model_executor/layers/quantization/gptq_bitblas.py +482 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +789 -0
- vllm/model_executor/layers/quantization/gptq_marlin_24.py +320 -0
- vllm/model_executor/layers/quantization/hqq_marlin.py +371 -0
- vllm/model_executor/layers/quantization/inc.py +65 -0
- vllm/model_executor/layers/quantization/input_quant_fp8.py +171 -0
- vllm/model_executor/layers/quantization/ipex_quant.py +467 -0
- vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +94 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +105 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +323 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +98 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +119 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +111 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +161 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +159 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +166 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +73 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +97 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +219 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +140 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +42 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
- vllm/model_executor/layers/quantization/kv_cache.py +146 -0
- vllm/model_executor/layers/quantization/modelopt.py +1788 -0
- vllm/model_executor/layers/quantization/moe_wna16.py +541 -0
- vllm/model_executor/layers/quantization/mxfp4.py +1162 -0
- vllm/model_executor/layers/quantization/petit.py +320 -0
- vllm/model_executor/layers/quantization/ptpc_fp8.py +137 -0
- vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
- vllm/model_executor/layers/quantization/quark/quark.py +528 -0
- vllm/model_executor/layers/quantization/quark/quark_moe.py +683 -0
- vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_ocp_mx.py +306 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +179 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +139 -0
- vllm/model_executor/layers/quantization/quark/utils.py +105 -0
- vllm/model_executor/layers/quantization/qutlass_utils.py +185 -0
- vllm/model_executor/layers/quantization/rtn.py +652 -0
- vllm/model_executor/layers/quantization/schema.py +90 -0
- vllm/model_executor/layers/quantization/torchao.py +380 -0
- vllm/model_executor/layers/quantization/tpu_int8.py +139 -0
- vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
- vllm/model_executor/layers/quantization/utils/allspark_utils.py +67 -0
- vllm/model_executor/layers/quantization/utils/bitblas_utils.py +229 -0
- vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
- vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +89 -0
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +298 -0
- vllm/model_executor/layers/quantization/utils/fp8_utils.py +1203 -0
- vllm/model_executor/layers/quantization/utils/gptq_utils.py +158 -0
- vllm/model_executor/layers/quantization/utils/int8_utils.py +489 -0
- vllm/model_executor/layers/quantization/utils/layer_utils.py +41 -0
- vllm/model_executor/layers/quantization/utils/machete_utils.py +56 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils.py +575 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +397 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +351 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +161 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +467 -0
- vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +181 -0
- vllm/model_executor/layers/quantization/utils/mxfp6_utils.py +142 -0
- vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +24 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +142 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +63 -0
- vllm/model_executor/layers/quantization/utils/ocp_mx_utils.py +51 -0
- vllm/model_executor/layers/quantization/utils/petit_utils.py +124 -0
- vllm/model_executor/layers/quantization/utils/quant_utils.py +687 -0
- vllm/model_executor/layers/quantization/utils/w8a8_utils.py +516 -0
- vllm/model_executor/layers/resampler.py +283 -0
- vllm/model_executor/layers/rotary_embedding/__init__.py +278 -0
- vllm/model_executor/layers/rotary_embedding/base.py +235 -0
- vllm/model_executor/layers/rotary_embedding/common.py +188 -0
- vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +165 -0
- vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +215 -0
- vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +43 -0
- vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +68 -0
- vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +75 -0
- vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
- vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
- vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +80 -0
- vllm/model_executor/layers/rotary_embedding/mrope.py +397 -0
- vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +47 -0
- vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +159 -0
- vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +81 -0
- vllm/model_executor/layers/utils.py +251 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +558 -0
- vllm/model_executor/model_loader/__init__.py +148 -0
- vllm/model_executor/model_loader/base_loader.py +57 -0
- vllm/model_executor/model_loader/bitsandbytes_loader.py +822 -0
- vllm/model_executor/model_loader/default_loader.py +327 -0
- vllm/model_executor/model_loader/dummy_loader.py +28 -0
- vllm/model_executor/model_loader/gguf_loader.py +176 -0
- vllm/model_executor/model_loader/online_quantization.py +224 -0
- vllm/model_executor/model_loader/runai_streamer_loader.py +116 -0
- vllm/model_executor/model_loader/sharded_state_loader.py +206 -0
- vllm/model_executor/model_loader/tensorizer.py +790 -0
- vllm/model_executor/model_loader/tensorizer_loader.py +151 -0
- vllm/model_executor/model_loader/tpu.py +118 -0
- vllm/model_executor/model_loader/utils.py +288 -0
- vllm/model_executor/model_loader/weight_utils.py +1084 -0
- vllm/model_executor/models/__init__.py +44 -0
- vllm/model_executor/models/adapters.py +543 -0
- vllm/model_executor/models/afmoe.py +711 -0
- vllm/model_executor/models/aimv2.py +247 -0
- vllm/model_executor/models/apertus.py +587 -0
- vllm/model_executor/models/arcee.py +439 -0
- vllm/model_executor/models/arctic.py +635 -0
- vllm/model_executor/models/aria.py +655 -0
- vllm/model_executor/models/aya_vision.py +450 -0
- vllm/model_executor/models/baichuan.py +496 -0
- vllm/model_executor/models/bailing_moe.py +646 -0
- vllm/model_executor/models/bamba.py +522 -0
- vllm/model_executor/models/bee.py +157 -0
- vllm/model_executor/models/bert.py +925 -0
- vllm/model_executor/models/bert_with_rope.py +732 -0
- vllm/model_executor/models/blip.py +349 -0
- vllm/model_executor/models/blip2.py +695 -0
- vllm/model_executor/models/bloom.py +390 -0
- vllm/model_executor/models/chameleon.py +1120 -0
- vllm/model_executor/models/chatglm.py +498 -0
- vllm/model_executor/models/clip.py +965 -0
- vllm/model_executor/models/cohere2_vision.py +472 -0
- vllm/model_executor/models/commandr.py +473 -0
- vllm/model_executor/models/config.py +503 -0
- vllm/model_executor/models/dbrx.py +482 -0
- vllm/model_executor/models/deepencoder.py +673 -0
- vllm/model_executor/models/deepseek_eagle.py +260 -0
- vllm/model_executor/models/deepseek_mtp.py +360 -0
- vllm/model_executor/models/deepseek_ocr.py +593 -0
- vllm/model_executor/models/deepseek_v2.py +1649 -0
- vllm/model_executor/models/deepseek_vl2.py +655 -0
- vllm/model_executor/models/dots1.py +574 -0
- vllm/model_executor/models/dots_ocr.py +900 -0
- vllm/model_executor/models/ernie45.py +53 -0
- vllm/model_executor/models/ernie45_moe.py +759 -0
- vllm/model_executor/models/ernie45_vl.py +1742 -0
- vllm/model_executor/models/ernie45_vl_moe.py +803 -0
- vllm/model_executor/models/ernie_mtp.py +279 -0
- vllm/model_executor/models/exaone.py +545 -0
- vllm/model_executor/models/exaone4.py +531 -0
- vllm/model_executor/models/fairseq2_llama.py +154 -0
- vllm/model_executor/models/falcon.py +545 -0
- vllm/model_executor/models/falcon_h1.py +685 -0
- vllm/model_executor/models/flex_olmo.py +155 -0
- vllm/model_executor/models/fuyu.py +373 -0
- vllm/model_executor/models/gemma.py +426 -0
- vllm/model_executor/models/gemma2.py +439 -0
- vllm/model_executor/models/gemma3.py +571 -0
- vllm/model_executor/models/gemma3_mm.py +741 -0
- vllm/model_executor/models/gemma3n.py +1165 -0
- vllm/model_executor/models/gemma3n_mm.py +811 -0
- vllm/model_executor/models/glm.py +23 -0
- vllm/model_executor/models/glm4.py +305 -0
- vllm/model_executor/models/glm4_1v.py +1821 -0
- vllm/model_executor/models/glm4_moe.py +747 -0
- vllm/model_executor/models/glm4_moe_mtp.py +359 -0
- vllm/model_executor/models/glm4v.py +784 -0
- vllm/model_executor/models/gpt2.py +397 -0
- vllm/model_executor/models/gpt_bigcode.py +339 -0
- vllm/model_executor/models/gpt_j.py +346 -0
- vllm/model_executor/models/gpt_neox.py +344 -0
- vllm/model_executor/models/gpt_oss.py +738 -0
- vllm/model_executor/models/granite.py +516 -0
- vllm/model_executor/models/granite_speech.py +913 -0
- vllm/model_executor/models/granitemoe.py +569 -0
- vllm/model_executor/models/granitemoehybrid.py +709 -0
- vllm/model_executor/models/granitemoeshared.py +333 -0
- vllm/model_executor/models/gritlm.py +245 -0
- vllm/model_executor/models/grok1.py +558 -0
- vllm/model_executor/models/h2ovl.py +554 -0
- vllm/model_executor/models/hunyuan_v1.py +1053 -0
- vllm/model_executor/models/hyperclovax_vision.py +1166 -0
- vllm/model_executor/models/idefics2_vision_model.py +426 -0
- vllm/model_executor/models/idefics3.py +717 -0
- vllm/model_executor/models/interfaces.py +1092 -0
- vllm/model_executor/models/interfaces_base.py +214 -0
- vllm/model_executor/models/intern_vit.py +453 -0
- vllm/model_executor/models/internlm2.py +460 -0
- vllm/model_executor/models/internlm2_ve.py +142 -0
- vllm/model_executor/models/interns1.py +830 -0
- vllm/model_executor/models/interns1_vit.py +432 -0
- vllm/model_executor/models/internvl.py +1452 -0
- vllm/model_executor/models/jais.py +397 -0
- vllm/model_executor/models/jamba.py +610 -0
- vllm/model_executor/models/jina_vl.py +147 -0
- vllm/model_executor/models/keye.py +1761 -0
- vllm/model_executor/models/keye_vl1_5.py +726 -0
- vllm/model_executor/models/kimi_linear.py +663 -0
- vllm/model_executor/models/kimi_vl.py +578 -0
- vllm/model_executor/models/lfm2.py +532 -0
- vllm/model_executor/models/lfm2_moe.py +762 -0
- vllm/model_executor/models/lightonocr.py +195 -0
- vllm/model_executor/models/llama.py +732 -0
- vllm/model_executor/models/llama4.py +859 -0
- vllm/model_executor/models/llama4_eagle.py +223 -0
- vllm/model_executor/models/llama_eagle.py +218 -0
- vllm/model_executor/models/llama_eagle3.py +367 -0
- vllm/model_executor/models/llava.py +842 -0
- vllm/model_executor/models/llava_next.py +583 -0
- vllm/model_executor/models/llava_next_video.py +467 -0
- vllm/model_executor/models/llava_onevision.py +923 -0
- vllm/model_executor/models/longcat_flash.py +749 -0
- vllm/model_executor/models/longcat_flash_mtp.py +349 -0
- vllm/model_executor/models/mamba.py +276 -0
- vllm/model_executor/models/mamba2.py +289 -0
- vllm/model_executor/models/medusa.py +179 -0
- vllm/model_executor/models/midashenglm.py +827 -0
- vllm/model_executor/models/mimo.py +188 -0
- vllm/model_executor/models/mimo_mtp.py +294 -0
- vllm/model_executor/models/minicpm.py +664 -0
- vllm/model_executor/models/minicpm3.py +242 -0
- vllm/model_executor/models/minicpm_eagle.py +389 -0
- vllm/model_executor/models/minicpmo.py +768 -0
- vllm/model_executor/models/minicpmv.py +1745 -0
- vllm/model_executor/models/minimax_m2.py +552 -0
- vllm/model_executor/models/minimax_text_01.py +1012 -0
- vllm/model_executor/models/minimax_vl_01.py +396 -0
- vllm/model_executor/models/mistral3.py +637 -0
- vllm/model_executor/models/mixtral.py +621 -0
- vllm/model_executor/models/mllama4.py +1147 -0
- vllm/model_executor/models/mlp_speculator.py +235 -0
- vllm/model_executor/models/modernbert.py +450 -0
- vllm/model_executor/models/module_mapping.py +74 -0
- vllm/model_executor/models/molmo.py +1555 -0
- vllm/model_executor/models/moonvit.py +677 -0
- vllm/model_executor/models/mpt.py +335 -0
- vllm/model_executor/models/nano_nemotron_vl.py +1740 -0
- vllm/model_executor/models/nemotron.py +518 -0
- vllm/model_executor/models/nemotron_h.py +852 -0
- vllm/model_executor/models/nemotron_nas.py +491 -0
- vllm/model_executor/models/nemotron_vl.py +653 -0
- vllm/model_executor/models/nvlm_d.py +216 -0
- vllm/model_executor/models/olmo.py +414 -0
- vllm/model_executor/models/olmo2.py +454 -0
- vllm/model_executor/models/olmoe.py +498 -0
- vllm/model_executor/models/openpangu.py +1062 -0
- vllm/model_executor/models/openpangu_mtp.py +265 -0
- vllm/model_executor/models/opt.py +426 -0
- vllm/model_executor/models/orion.py +372 -0
- vllm/model_executor/models/ouro.py +516 -0
- vllm/model_executor/models/ovis.py +559 -0
- vllm/model_executor/models/ovis2_5.py +673 -0
- vllm/model_executor/models/paddleocr_vl.py +1407 -0
- vllm/model_executor/models/paligemma.py +412 -0
- vllm/model_executor/models/persimmon.py +377 -0
- vllm/model_executor/models/phi.py +374 -0
- vllm/model_executor/models/phi3.py +18 -0
- vllm/model_executor/models/phi3v.py +737 -0
- vllm/model_executor/models/phi4_multimodal.py +1447 -0
- vllm/model_executor/models/phi4mm.py +1253 -0
- vllm/model_executor/models/phi4mm_audio.py +1296 -0
- vllm/model_executor/models/phi4mm_utils.py +1907 -0
- vllm/model_executor/models/phimoe.py +675 -0
- vllm/model_executor/models/pixtral.py +1352 -0
- vllm/model_executor/models/plamo2.py +981 -0
- vllm/model_executor/models/qwen.py +368 -0
- vllm/model_executor/models/qwen2.py +541 -0
- vllm/model_executor/models/qwen2_5_omni_thinker.py +1246 -0
- vllm/model_executor/models/qwen2_5_vl.py +1613 -0
- vllm/model_executor/models/qwen2_audio.py +473 -0
- vllm/model_executor/models/qwen2_moe.py +596 -0
- vllm/model_executor/models/qwen2_rm.py +123 -0
- vllm/model_executor/models/qwen2_vl.py +1670 -0
- vllm/model_executor/models/qwen3.py +336 -0
- vllm/model_executor/models/qwen3_moe.py +744 -0
- vllm/model_executor/models/qwen3_next.py +1395 -0
- vllm/model_executor/models/qwen3_next_mtp.py +296 -0
- vllm/model_executor/models/qwen3_omni_moe_thinker.py +1721 -0
- vllm/model_executor/models/qwen3_vl.py +1673 -0
- vllm/model_executor/models/qwen3_vl_moe.py +415 -0
- vllm/model_executor/models/qwen_vl.py +802 -0
- vllm/model_executor/models/radio.py +555 -0
- vllm/model_executor/models/registry.py +1155 -0
- vllm/model_executor/models/roberta.py +259 -0
- vllm/model_executor/models/rvl.py +107 -0
- vllm/model_executor/models/seed_oss.py +497 -0
- vllm/model_executor/models/siglip.py +1174 -0
- vllm/model_executor/models/siglip2navit.py +724 -0
- vllm/model_executor/models/skyworkr1v.py +953 -0
- vllm/model_executor/models/smolvlm.py +38 -0
- vllm/model_executor/models/solar.py +502 -0
- vllm/model_executor/models/stablelm.py +359 -0
- vllm/model_executor/models/starcoder2.py +367 -0
- vllm/model_executor/models/step3_text.py +559 -0
- vllm/model_executor/models/step3_vl.py +1148 -0
- vllm/model_executor/models/swin.py +514 -0
- vllm/model_executor/models/tarsier.py +619 -0
- vllm/model_executor/models/telechat2.py +153 -0
- vllm/model_executor/models/teleflm.py +78 -0
- vllm/model_executor/models/terratorch.py +319 -0
- vllm/model_executor/models/transformers/__init__.py +127 -0
- vllm/model_executor/models/transformers/base.py +464 -0
- vllm/model_executor/models/transformers/causal.py +65 -0
- vllm/model_executor/models/transformers/legacy.py +90 -0
- vllm/model_executor/models/transformers/moe.py +318 -0
- vllm/model_executor/models/transformers/multimodal.py +411 -0
- vllm/model_executor/models/transformers/pooling.py +119 -0
- vllm/model_executor/models/transformers/utils.py +207 -0
- vllm/model_executor/models/ultravox.py +681 -0
- vllm/model_executor/models/utils.py +877 -0
- vllm/model_executor/models/vision.py +552 -0
- vllm/model_executor/models/voxtral.py +845 -0
- vllm/model_executor/models/whisper.py +959 -0
- vllm/model_executor/models/zamba2.py +986 -0
- vllm/model_executor/parameter.py +642 -0
- vllm/model_executor/utils.py +94 -0
- vllm/model_executor/warmup/__init__.py +0 -0
- vllm/model_executor/warmup/deep_gemm_warmup.py +314 -0
- vllm/model_executor/warmup/kernel_warmup.py +98 -0
- vllm/multimodal/__init__.py +40 -0
- vllm/multimodal/audio.py +118 -0
- vllm/multimodal/base.py +26 -0
- vllm/multimodal/cache.py +755 -0
- vllm/multimodal/evs.py +294 -0
- vllm/multimodal/hasher.py +106 -0
- vllm/multimodal/image.py +130 -0
- vllm/multimodal/inputs.py +1036 -0
- vllm/multimodal/parse.py +544 -0
- vllm/multimodal/processing.py +2186 -0
- vllm/multimodal/profiling.py +369 -0
- vllm/multimodal/registry.py +360 -0
- vllm/multimodal/utils.py +512 -0
- vllm/multimodal/video.py +306 -0
- vllm/outputs.py +345 -0
- vllm/platforms/__init__.py +277 -0
- vllm/platforms/cpu.py +414 -0
- vllm/platforms/cuda.py +657 -0
- vllm/platforms/interface.py +639 -0
- vllm/platforms/rocm.py +466 -0
- vllm/platforms/tpu.py +276 -0
- vllm/platforms/xpu.py +274 -0
- vllm/plugins/__init__.py +78 -0
- vllm/plugins/io_processors/__init__.py +68 -0
- vllm/plugins/io_processors/interface.py +77 -0
- vllm/plugins/lora_resolvers/__init__.py +0 -0
- vllm/plugins/lora_resolvers/filesystem_resolver.py +52 -0
- vllm/pooling_params.py +228 -0
- vllm/profiler/__init__.py +0 -0
- vllm/profiler/gpu_profiler.py +37 -0
- vllm/profiler/layerwise_profile.py +392 -0
- vllm/profiler/utils.py +151 -0
- vllm/py.typed +2 -0
- vllm/ray/__init__.py +0 -0
- vllm/ray/lazy_utils.py +26 -0
- vllm/ray/ray_env.py +79 -0
- vllm/reasoning/__init__.py +92 -0
- vllm/reasoning/abs_reasoning_parsers.py +290 -0
- vllm/reasoning/basic_parsers.py +162 -0
- vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
- vllm/reasoning/deepseek_v3_reasoning_parser.py +62 -0
- vllm/reasoning/ernie45_reasoning_parser.py +165 -0
- vllm/reasoning/glm4_moe_reasoning_parser.py +171 -0
- vllm/reasoning/gptoss_reasoning_parser.py +173 -0
- vllm/reasoning/granite_reasoning_parser.py +363 -0
- vllm/reasoning/hunyuan_a13b_reasoning_parser.py +237 -0
- vllm/reasoning/identity_reasoning_parser.py +58 -0
- vllm/reasoning/minimax_m2_reasoning_parser.py +67 -0
- vllm/reasoning/mistral_reasoning_parser.py +55 -0
- vllm/reasoning/olmo3_reasoning_parser.py +302 -0
- vllm/reasoning/qwen3_reasoning_parser.py +67 -0
- vllm/reasoning/seedoss_reasoning_parser.py +27 -0
- vllm/reasoning/step3_reasoning_parser.py +107 -0
- vllm/sampling_params.py +669 -0
- vllm/scalar_type.py +355 -0
- vllm/scripts.py +17 -0
- vllm/sequence.py +98 -0
- vllm/tasks.py +13 -0
- vllm/third_party/__init__.py +0 -0
- vllm/third_party/pynvml.py +6140 -0
- vllm/tracing.py +135 -0
- vllm/transformers_utils/__init__.py +26 -0
- vllm/transformers_utils/chat_templates/__init__.py +5 -0
- vllm/transformers_utils/chat_templates/registry.py +73 -0
- vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
- vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
- vllm/transformers_utils/chat_templates/template_deepseek_ocr.jinja +14 -0
- vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
- vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
- vllm/transformers_utils/config.py +1203 -0
- vllm/transformers_utils/config_parser_base.py +20 -0
- vllm/transformers_utils/configs/__init__.py +70 -0
- vllm/transformers_utils/configs/afmoe.py +84 -0
- vllm/transformers_utils/configs/arctic.py +206 -0
- vllm/transformers_utils/configs/chatglm.py +75 -0
- vllm/transformers_utils/configs/deepseek_vl2.py +126 -0
- vllm/transformers_utils/configs/dotsocr.py +71 -0
- vllm/transformers_utils/configs/eagle.py +84 -0
- vllm/transformers_utils/configs/falcon.py +89 -0
- vllm/transformers_utils/configs/flex_olmo.py +77 -0
- vllm/transformers_utils/configs/jais.py +243 -0
- vllm/transformers_utils/configs/kimi_linear.py +144 -0
- vllm/transformers_utils/configs/kimi_vl.py +38 -0
- vllm/transformers_utils/configs/lfm2_moe.py +159 -0
- vllm/transformers_utils/configs/medusa.py +65 -0
- vllm/transformers_utils/configs/midashenglm.py +103 -0
- vllm/transformers_utils/configs/mistral.py +174 -0
- vllm/transformers_utils/configs/mlp_speculator.py +69 -0
- vllm/transformers_utils/configs/moonvit.py +33 -0
- vllm/transformers_utils/configs/nemotron.py +212 -0
- vllm/transformers_utils/configs/nemotron_h.py +282 -0
- vllm/transformers_utils/configs/olmo3.py +79 -0
- vllm/transformers_utils/configs/ovis.py +182 -0
- vllm/transformers_utils/configs/qwen3_next.py +274 -0
- vllm/transformers_utils/configs/radio.py +89 -0
- vllm/transformers_utils/configs/speculators/__init__.py +2 -0
- vllm/transformers_utils/configs/speculators/algos.py +38 -0
- vllm/transformers_utils/configs/speculators/base.py +114 -0
- vllm/transformers_utils/configs/step3_vl.py +174 -0
- vllm/transformers_utils/configs/ultravox.py +118 -0
- vllm/transformers_utils/detokenizer_utils.py +198 -0
- vllm/transformers_utils/dynamic_module.py +59 -0
- vllm/transformers_utils/processor.py +402 -0
- vllm/transformers_utils/processors/__init__.py +15 -0
- vllm/transformers_utils/processors/deepseek_ocr.py +438 -0
- vllm/transformers_utils/processors/deepseek_vl2.py +406 -0
- vllm/transformers_utils/processors/ovis.py +453 -0
- vllm/transformers_utils/processors/ovis2_5.py +468 -0
- vllm/transformers_utils/runai_utils.py +104 -0
- vllm/transformers_utils/s3_utils.py +95 -0
- vllm/transformers_utils/tokenizer.py +293 -0
- vllm/transformers_utils/tokenizer_base.py +155 -0
- vllm/transformers_utils/tokenizers/__init__.py +16 -0
- vllm/transformers_utils/tokenizers/mistral.py +502 -0
- vllm/transformers_utils/utils.py +130 -0
- vllm/triton_utils/__init__.py +19 -0
- vllm/triton_utils/importing.py +103 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +294 -0
- vllm/utils/__init__.py +82 -0
- vllm/utils/argparse_utils.py +487 -0
- vllm/utils/async_utils.py +303 -0
- vllm/utils/cache.py +214 -0
- vllm/utils/collection_utils.py +139 -0
- vllm/utils/counter.py +45 -0
- vllm/utils/deep_gemm.py +391 -0
- vllm/utils/flashinfer.py +490 -0
- vllm/utils/func_utils.py +236 -0
- vllm/utils/gc_utils.py +147 -0
- vllm/utils/hashing.py +63 -0
- vllm/utils/import_utils.py +411 -0
- vllm/utils/jsontree.py +165 -0
- vllm/utils/math_utils.py +32 -0
- vllm/utils/mem_constants.py +13 -0
- vllm/utils/mem_utils.py +232 -0
- vllm/utils/nccl.py +64 -0
- vllm/utils/network_utils.py +331 -0
- vllm/utils/platform_utils.py +59 -0
- vllm/utils/profiling.py +56 -0
- vllm/utils/registry.py +49 -0
- vllm/utils/serial_utils.py +169 -0
- vllm/utils/system_utils.py +229 -0
- vllm/utils/tensor_schema.py +255 -0
- vllm/utils/torch_utils.py +657 -0
- vllm/v1/__init__.py +0 -0
- vllm/v1/attention/__init__.py +0 -0
- vllm/v1/attention/backends/__init__.py +0 -0
- vllm/v1/attention/backends/cpu_attn.py +496 -0
- vllm/v1/attention/backends/flash_attn.py +1028 -0
- vllm/v1/attention/backends/flashinfer.py +1572 -0
- vllm/v1/attention/backends/flex_attention.py +926 -0
- vllm/v1/attention/backends/gdn_attn.py +387 -0
- vllm/v1/attention/backends/linear_attn.py +74 -0
- vllm/v1/attention/backends/mamba1_attn.py +165 -0
- vllm/v1/attention/backends/mamba2_attn.py +354 -0
- vllm/v1/attention/backends/mamba_attn.py +115 -0
- vllm/v1/attention/backends/mla/__init__.py +0 -0
- vllm/v1/attention/backends/mla/common.py +2031 -0
- vllm/v1/attention/backends/mla/cutlass_mla.py +275 -0
- vllm/v1/attention/backends/mla/flashattn_mla.py +337 -0
- vllm/v1/attention/backends/mla/flashinfer_mla.py +171 -0
- vllm/v1/attention/backends/mla/flashmla.py +314 -0
- vllm/v1/attention/backends/mla/flashmla_sparse.py +548 -0
- vllm/v1/attention/backends/mla/indexer.py +362 -0
- vllm/v1/attention/backends/mla/rocm_aiter_mla.py +294 -0
- vllm/v1/attention/backends/mla/triton_mla.py +171 -0
- vllm/v1/attention/backends/pallas.py +436 -0
- vllm/v1/attention/backends/rocm_aiter_fa.py +816 -0
- vllm/v1/attention/backends/rocm_aiter_unified_attn.py +196 -0
- vllm/v1/attention/backends/rocm_attn.py +362 -0
- vllm/v1/attention/backends/short_conv_attn.py +105 -0
- vllm/v1/attention/backends/tree_attn.py +425 -0
- vllm/v1/attention/backends/triton_attn.py +373 -0
- vllm/v1/attention/backends/utils.py +1116 -0
- vllm/v1/attention/backends/xformers.py +417 -0
- vllm/v1/core/__init__.py +0 -0
- vllm/v1/core/block_pool.py +428 -0
- vllm/v1/core/encoder_cache_manager.py +343 -0
- vllm/v1/core/kv_cache_coordinator.py +480 -0
- vllm/v1/core/kv_cache_manager.py +420 -0
- vllm/v1/core/kv_cache_utils.py +1340 -0
- vllm/v1/core/sched/__init__.py +0 -0
- vllm/v1/core/sched/async_scheduler.py +62 -0
- vllm/v1/core/sched/interface.py +181 -0
- vllm/v1/core/sched/output.py +202 -0
- vllm/v1/core/sched/request_queue.py +221 -0
- vllm/v1/core/sched/scheduler.py +1617 -0
- vllm/v1/core/sched/utils.py +72 -0
- vllm/v1/core/single_type_kv_cache_manager.py +736 -0
- vllm/v1/cudagraph_dispatcher.py +148 -0
- vllm/v1/engine/__init__.py +206 -0
- vllm/v1/engine/async_llm.py +797 -0
- vllm/v1/engine/coordinator.py +377 -0
- vllm/v1/engine/core.py +1420 -0
- vllm/v1/engine/core_client.py +1400 -0
- vllm/v1/engine/detokenizer.py +351 -0
- vllm/v1/engine/exceptions.py +18 -0
- vllm/v1/engine/llm_engine.py +408 -0
- vllm/v1/engine/logprobs.py +182 -0
- vllm/v1/engine/output_processor.py +642 -0
- vllm/v1/engine/parallel_sampling.py +145 -0
- vllm/v1/engine/processor.py +621 -0
- vllm/v1/engine/utils.py +1072 -0
- vllm/v1/executor/__init__.py +6 -0
- vllm/v1/executor/abstract.py +352 -0
- vllm/v1/executor/multiproc_executor.py +877 -0
- vllm/v1/executor/ray_distributed_executor.py +8 -0
- vllm/v1/executor/ray_executor.py +626 -0
- vllm/v1/executor/ray_utils.py +465 -0
- vllm/v1/executor/uniproc_executor.py +183 -0
- vllm/v1/kv_cache_interface.py +403 -0
- vllm/v1/kv_offload/__init__.py +0 -0
- vllm/v1/kv_offload/abstract.py +161 -0
- vllm/v1/kv_offload/arc_manager.py +237 -0
- vllm/v1/kv_offload/backend.py +97 -0
- vllm/v1/kv_offload/backends/__init__.py +0 -0
- vllm/v1/kv_offload/backends/cpu.py +62 -0
- vllm/v1/kv_offload/cpu.py +93 -0
- vllm/v1/kv_offload/factory.py +56 -0
- vllm/v1/kv_offload/lru_manager.py +139 -0
- vllm/v1/kv_offload/mediums.py +39 -0
- vllm/v1/kv_offload/spec.py +62 -0
- vllm/v1/kv_offload/worker/__init__.py +0 -0
- vllm/v1/kv_offload/worker/cpu_gpu.py +185 -0
- vllm/v1/kv_offload/worker/worker.py +144 -0
- vllm/v1/metrics/__init__.py +0 -0
- vllm/v1/metrics/loggers.py +1238 -0
- vllm/v1/metrics/prometheus.py +82 -0
- vllm/v1/metrics/ray_wrappers.py +169 -0
- vllm/v1/metrics/reader.py +257 -0
- vllm/v1/metrics/stats.py +420 -0
- vllm/v1/outputs.py +249 -0
- vllm/v1/pool/__init__.py +0 -0
- vllm/v1/pool/metadata.py +82 -0
- vllm/v1/request.py +259 -0
- vllm/v1/sample/__init__.py +0 -0
- vllm/v1/sample/logits_processor/__init__.py +352 -0
- vllm/v1/sample/logits_processor/builtin.py +274 -0
- vllm/v1/sample/logits_processor/interface.py +106 -0
- vllm/v1/sample/logits_processor/state.py +165 -0
- vllm/v1/sample/metadata.py +44 -0
- vllm/v1/sample/ops/__init__.py +0 -0
- vllm/v1/sample/ops/bad_words.py +52 -0
- vllm/v1/sample/ops/logprobs.py +25 -0
- vllm/v1/sample/ops/penalties.py +57 -0
- vllm/v1/sample/ops/topk_topp_sampler.py +290 -0
- vllm/v1/sample/rejection_sampler.py +793 -0
- vllm/v1/sample/sampler.py +316 -0
- vllm/v1/sample/tpu/__init__.py +0 -0
- vllm/v1/sample/tpu/metadata.py +120 -0
- vllm/v1/sample/tpu/sampler.py +215 -0
- vllm/v1/serial_utils.py +532 -0
- vllm/v1/spec_decode/__init__.py +0 -0
- vllm/v1/spec_decode/eagle.py +1225 -0
- vllm/v1/spec_decode/medusa.py +73 -0
- vllm/v1/spec_decode/metadata.py +66 -0
- vllm/v1/spec_decode/metrics.py +224 -0
- vllm/v1/spec_decode/ngram_proposer.py +291 -0
- vllm/v1/spec_decode/suffix_decoding.py +103 -0
- vllm/v1/spec_decode/utils.py +16 -0
- vllm/v1/structured_output/__init__.py +338 -0
- vllm/v1/structured_output/backend_guidance.py +265 -0
- vllm/v1/structured_output/backend_lm_format_enforcer.py +177 -0
- vllm/v1/structured_output/backend_outlines.py +324 -0
- vllm/v1/structured_output/backend_types.py +136 -0
- vllm/v1/structured_output/backend_xgrammar.py +362 -0
- vllm/v1/structured_output/request.py +94 -0
- vllm/v1/structured_output/utils.py +469 -0
- vllm/v1/utils.py +414 -0
- vllm/v1/worker/__init__.py +0 -0
- vllm/v1/worker/block_table.py +327 -0
- vllm/v1/worker/cpu_model_runner.py +122 -0
- vllm/v1/worker/cpu_worker.py +206 -0
- vllm/v1/worker/dp_utils.py +230 -0
- vllm/v1/worker/ec_connector_model_runner_mixin.py +87 -0
- vllm/v1/worker/gpu_input_batch.py +975 -0
- vllm/v1/worker/gpu_model_runner.py +5102 -0
- vllm/v1/worker/gpu_ubatch_wrapper.py +466 -0
- vllm/v1/worker/gpu_worker.py +894 -0
- vllm/v1/worker/kv_connector_model_runner_mixin.py +144 -0
- vllm/v1/worker/lora_model_runner_mixin.py +213 -0
- vllm/v1/worker/tpu_input_batch.py +593 -0
- vllm/v1/worker/tpu_model_runner.py +2173 -0
- vllm/v1/worker/tpu_worker.py +355 -0
- vllm/v1/worker/ubatch_utils.py +73 -0
- vllm/v1/worker/ubatching.py +231 -0
- vllm/v1/worker/utils.py +366 -0
- vllm/v1/worker/worker_base.py +375 -0
- vllm/v1/worker/xpu_model_runner.py +55 -0
- vllm/v1/worker/xpu_worker.py +189 -0
- vllm/version.py +39 -0
- vllm/vllm_flash_attn/.gitkeep +0 -0
- vllm_cpu_amxbf16-0.11.2.post2.dist-info/METADATA +345 -0
- vllm_cpu_amxbf16-0.11.2.post2.dist-info/RECORD +1536 -0
- vllm_cpu_amxbf16-0.11.2.post2.dist-info/WHEEL +5 -0
- vllm_cpu_amxbf16-0.11.2.post2.dist-info/entry_points.txt +5 -0
- vllm_cpu_amxbf16-0.11.2.post2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1162 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
3
|
+
from collections.abc import Callable
|
|
4
|
+
from enum import Enum
|
|
5
|
+
from typing import Optional
|
|
6
|
+
|
|
7
|
+
import torch
|
|
8
|
+
from torch.nn.parameter import Parameter
|
|
9
|
+
|
|
10
|
+
from vllm import envs
|
|
11
|
+
from vllm.config import get_current_vllm_config
|
|
12
|
+
from vllm.logger import init_logger
|
|
13
|
+
from vllm.model_executor.layers.fused_moe import (
|
|
14
|
+
FusedMoE,
|
|
15
|
+
FusedMoEConfig,
|
|
16
|
+
FusedMoEMethodBase,
|
|
17
|
+
)
|
|
18
|
+
from vllm.model_executor.layers.fused_moe import modular_kernel as mk
|
|
19
|
+
from vllm.model_executor.layers.fused_moe.config import (
|
|
20
|
+
FusedMoEQuantConfig,
|
|
21
|
+
mxfp4_mxfp8_moe_quant_config,
|
|
22
|
+
mxfp4_w4a16_moe_quant_config,
|
|
23
|
+
ocp_mx_moe_quant_config,
|
|
24
|
+
)
|
|
25
|
+
from vllm.model_executor.layers.fused_moe.fused_marlin_moe import (
|
|
26
|
+
BatchedMarlinExperts,
|
|
27
|
+
MarlinExperts,
|
|
28
|
+
fused_marlin_moe,
|
|
29
|
+
)
|
|
30
|
+
from vllm.model_executor.layers.fused_moe.gpt_oss_triton_kernels_moe import (
|
|
31
|
+
OAITritonExperts,
|
|
32
|
+
)
|
|
33
|
+
from vllm.model_executor.layers.fused_moe.trtllm_moe import TrtLlmGenExperts
|
|
34
|
+
from vllm.model_executor.layers.linear import LinearBase, UnquantizedLinearMethod
|
|
35
|
+
from vllm.model_executor.layers.quantization import QuantizationMethods
|
|
36
|
+
from vllm.model_executor.layers.quantization.base_config import (
|
|
37
|
+
QuantizationConfig,
|
|
38
|
+
QuantizeMethodBase,
|
|
39
|
+
)
|
|
40
|
+
from vllm.model_executor.layers.quantization.utils.marlin_utils_fp4 import (
|
|
41
|
+
prepare_moe_fp4_layer_for_marlin,
|
|
42
|
+
)
|
|
43
|
+
from vllm.model_executor.layers.quantization.utils.mxfp4_utils import (
|
|
44
|
+
_can_support_mxfp4,
|
|
45
|
+
_swizzle_mxfp4,
|
|
46
|
+
get_padding_alignment,
|
|
47
|
+
)
|
|
48
|
+
from vllm.model_executor.layers.quantization.utils.quant_utils import is_layer_skipped
|
|
49
|
+
from vllm.model_executor.utils import set_weight_attrs
|
|
50
|
+
from vllm.platforms import current_platform
|
|
51
|
+
from vllm.scalar_type import scalar_types
|
|
52
|
+
from vllm.utils.flashinfer import has_flashinfer
|
|
53
|
+
from vllm.utils.import_utils import has_triton_kernels
|
|
54
|
+
from vllm.utils.math_utils import round_up
|
|
55
|
+
from vllm.utils.torch_utils import is_torch_equal_or_newer
|
|
56
|
+
|
|
57
|
+
logger = init_logger(__name__)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
# enum for mxfp4 backend
|
|
61
|
+
class Mxfp4Backend(Enum):
|
|
62
|
+
NONE = 0
|
|
63
|
+
|
|
64
|
+
# FlashInfer Backend
|
|
65
|
+
SM100_FI_MXFP4_MXFP8_TRTLLM = 1
|
|
66
|
+
SM100_FI_MXFP4_MXFP8_CUTLASS = 2
|
|
67
|
+
SM100_FI_MXFP4_BF16 = 3
|
|
68
|
+
SM90_FI_MXFP4_BF16 = 4
|
|
69
|
+
|
|
70
|
+
# Marlin Backend
|
|
71
|
+
MARLIN = 5
|
|
72
|
+
|
|
73
|
+
# Triton Backend
|
|
74
|
+
TRITON = 6
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def get_mxfp4_backend_with_lora() -> Mxfp4Backend:
|
|
78
|
+
"""
|
|
79
|
+
Not all MXFP4 backends support LoRA. Select backends that are known to
|
|
80
|
+
have LoRA support.
|
|
81
|
+
"""
|
|
82
|
+
if not current_platform.is_cuda():
|
|
83
|
+
return Mxfp4Backend.NONE
|
|
84
|
+
|
|
85
|
+
logger.info_once("[get_mxfp4_backend_with_lora] Using Marlin backend")
|
|
86
|
+
return Mxfp4Backend.MARLIN
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def get_mxfp4_backend(with_lora_support: bool) -> Mxfp4Backend:
|
|
90
|
+
# Backend Selection
|
|
91
|
+
|
|
92
|
+
if with_lora_support:
|
|
93
|
+
return get_mxfp4_backend_with_lora()
|
|
94
|
+
|
|
95
|
+
if current_platform.is_cuda():
|
|
96
|
+
if (
|
|
97
|
+
current_platform.is_device_capability(90)
|
|
98
|
+
and has_flashinfer()
|
|
99
|
+
and envs.VLLM_USE_FLASHINFER_MOE_MXFP4_BF16
|
|
100
|
+
):
|
|
101
|
+
logger.info_once("Using FlashInfer MXFP4 BF16 backend for SM90")
|
|
102
|
+
return Mxfp4Backend.SM90_FI_MXFP4_BF16
|
|
103
|
+
elif (
|
|
104
|
+
current_platform.is_device_capability(100)
|
|
105
|
+
and has_flashinfer()
|
|
106
|
+
and envs.VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8_CUTLASS
|
|
107
|
+
):
|
|
108
|
+
logger.info_once("Using FlashInfer MXFP4 MXFP8 CUTLASS backend for SM100")
|
|
109
|
+
return Mxfp4Backend.SM100_FI_MXFP4_MXFP8_CUTLASS
|
|
110
|
+
elif (
|
|
111
|
+
current_platform.is_device_capability(100)
|
|
112
|
+
and has_flashinfer()
|
|
113
|
+
and envs.VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8
|
|
114
|
+
):
|
|
115
|
+
return Mxfp4Backend.SM100_FI_MXFP4_MXFP8_TRTLLM
|
|
116
|
+
elif current_platform.is_device_capability(100) and has_flashinfer():
|
|
117
|
+
logger.info_once(
|
|
118
|
+
"Using FlashInfer MXFP4 BF16 backend for SM100, "
|
|
119
|
+
"For faster performance on SM100, consider setting "
|
|
120
|
+
"VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8=1, though this may impact "
|
|
121
|
+
"accuracy."
|
|
122
|
+
)
|
|
123
|
+
return Mxfp4Backend.SM100_FI_MXFP4_BF16
|
|
124
|
+
elif (
|
|
125
|
+
current_platform.is_device_capability(100)
|
|
126
|
+
or current_platform.is_device_capability(90)
|
|
127
|
+
) and not has_flashinfer():
|
|
128
|
+
logger.warning_once(
|
|
129
|
+
"MXFP4 MoE is enabled on Hopper/Blackwell but FlashInfer "
|
|
130
|
+
"is not available. This may result in degraded performance. "
|
|
131
|
+
"Please `pip install vllm[flashinfer]` for best results."
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
# If FlashInfer is not available, try either Marlin or Triton
|
|
135
|
+
if (
|
|
136
|
+
envs.VLLM_MXFP4_USE_MARLIN
|
|
137
|
+
or current_platform.get_device_capability()[0] < 9
|
|
138
|
+
or not has_triton_kernels()
|
|
139
|
+
or not is_torch_equal_or_newer("2.8.0")
|
|
140
|
+
):
|
|
141
|
+
logger.info_once("Using Marlin backend")
|
|
142
|
+
return Mxfp4Backend.MARLIN
|
|
143
|
+
else:
|
|
144
|
+
logger.info_once("Using Triton backend")
|
|
145
|
+
return Mxfp4Backend.TRITON
|
|
146
|
+
elif current_platform.is_xpu():
|
|
147
|
+
logger.info_once("Using ipex marlin backend on XPU")
|
|
148
|
+
return Mxfp4Backend.MARLIN
|
|
149
|
+
elif current_platform.is_rocm() and has_triton_kernels():
|
|
150
|
+
logger.info_once("Using Triton backend")
|
|
151
|
+
return Mxfp4Backend.TRITON
|
|
152
|
+
|
|
153
|
+
return Mxfp4Backend.NONE
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
class Mxfp4Config(QuantizationConfig):
|
|
157
|
+
def __init__(self, ignored_layers: list[str] | None = None):
|
|
158
|
+
super().__init__()
|
|
159
|
+
self.ignored_layers = ignored_layers
|
|
160
|
+
|
|
161
|
+
@classmethod
|
|
162
|
+
def from_config(cls, config):
|
|
163
|
+
return cls()
|
|
164
|
+
|
|
165
|
+
@classmethod
|
|
166
|
+
def get_min_capability(cls) -> int:
|
|
167
|
+
return 80
|
|
168
|
+
|
|
169
|
+
@classmethod
|
|
170
|
+
def get_name(cls) -> QuantizationMethods:
|
|
171
|
+
return "mxfp4"
|
|
172
|
+
|
|
173
|
+
@classmethod
|
|
174
|
+
def get_supported_act_dtypes(cls) -> list[torch.dtype]:
|
|
175
|
+
return [torch.bfloat16]
|
|
176
|
+
|
|
177
|
+
@classmethod
|
|
178
|
+
def get_config_filenames(cls) -> list[str]:
|
|
179
|
+
return []
|
|
180
|
+
|
|
181
|
+
def get_quant_method(
|
|
182
|
+
self, layer: torch.nn.Module, prefix: str
|
|
183
|
+
) -> Optional["QuantizeMethodBase"]:
|
|
184
|
+
from vllm.attention.layer import Attention # Avoid circular import
|
|
185
|
+
|
|
186
|
+
if isinstance(layer, LinearBase):
|
|
187
|
+
if self.ignored_layers and is_layer_skipped(
|
|
188
|
+
prefix=prefix,
|
|
189
|
+
ignored_layers=self.ignored_layers,
|
|
190
|
+
fused_mapping=self.packed_modules_mapping,
|
|
191
|
+
):
|
|
192
|
+
return UnquantizedLinearMethod()
|
|
193
|
+
# TODO: Add support for MXFP4 Linear Method.
|
|
194
|
+
# MXFP4 LinearMethod is available in AMD-Quark, refer to that implementation
|
|
195
|
+
# if you are interested in enabling MXFP4 here.
|
|
196
|
+
logger.warning_once(
|
|
197
|
+
"MXFP4 linear layer is not implemented - falling back to "
|
|
198
|
+
"UnquantizedLinearMethod."
|
|
199
|
+
)
|
|
200
|
+
return UnquantizedLinearMethod()
|
|
201
|
+
elif isinstance(layer, FusedMoE):
|
|
202
|
+
if current_platform.is_xpu():
|
|
203
|
+
return IpexMxfp4MoEMethod(layer.moe_config)
|
|
204
|
+
else:
|
|
205
|
+
return Mxfp4MoEMethod(layer.moe_config)
|
|
206
|
+
elif isinstance(layer, Attention):
|
|
207
|
+
# TODO: Add support for MXFP4 Attention.
|
|
208
|
+
logger.warning_once(
|
|
209
|
+
"MXFP4 attention layer is not implemented. "
|
|
210
|
+
"Skipping quantization for this layer."
|
|
211
|
+
)
|
|
212
|
+
return None
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
class Mxfp4MoEMethod(FusedMoEMethodBase):
|
|
216
|
+
def __init__(self, moe: FusedMoEConfig):
|
|
217
|
+
super().__init__(moe)
|
|
218
|
+
self.mxfp4_backend = get_mxfp4_backend(moe.is_lora_enabled)
|
|
219
|
+
self.use_marlin = self.mxfp4_backend == Mxfp4Backend.MARLIN
|
|
220
|
+
self.max_capture_size = (
|
|
221
|
+
get_current_vllm_config().compilation_config.max_cudagraph_capture_size
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
assert self.mxfp4_backend != Mxfp4Backend.NONE, (
|
|
225
|
+
f"get_mxfp4_backend(with_lora_support={moe.is_lora_enabled}) found"
|
|
226
|
+
"no compatible MXFP4 MoE backend (FlashInfer/Marlin/Triton)."
|
|
227
|
+
"Please check your environment and try again."
|
|
228
|
+
)
|
|
229
|
+
self._cache_permute_indices: dict[torch.Size, torch.Tensor] = {}
|
|
230
|
+
|
|
231
|
+
def create_weights(
|
|
232
|
+
self,
|
|
233
|
+
layer: torch.nn.Module,
|
|
234
|
+
num_experts: int,
|
|
235
|
+
hidden_size: int,
|
|
236
|
+
intermediate_size_per_partition: int,
|
|
237
|
+
params_dtype: torch.dtype,
|
|
238
|
+
**extra_weight_attrs,
|
|
239
|
+
):
|
|
240
|
+
self.num_experts = num_experts
|
|
241
|
+
weight_dtype = torch.uint8
|
|
242
|
+
scale_dtype = torch.uint8
|
|
243
|
+
|
|
244
|
+
# FIXME (zyongye): ship after torch and safetensors support mxfp4
|
|
245
|
+
# is_torch_mxfp4_available = (
|
|
246
|
+
# hasattr(torch, "float4_e2m1fn_x2") and
|
|
247
|
+
# hasattr(torch, "float8_e8m0fnu"))
|
|
248
|
+
# if is_torch_mxfp4_available:
|
|
249
|
+
# weight_dtype = torch.float4_e2m1fn_x2
|
|
250
|
+
# scale_dtype = torch.float8_e8m0fnu
|
|
251
|
+
|
|
252
|
+
mxfp4_block = 32
|
|
253
|
+
|
|
254
|
+
intermediate_size_per_partition_after_pad = intermediate_size_per_partition
|
|
255
|
+
if self.mxfp4_backend == Mxfp4Backend.MARLIN:
|
|
256
|
+
# The moe marlin kernel requires that for each linear
|
|
257
|
+
# n % 256 == 0 and k % 128 == 0.
|
|
258
|
+
# In gate_up_proj:
|
|
259
|
+
# n = 2 * intermediate_size_per_partition_after_pad
|
|
260
|
+
# k = hidden_size
|
|
261
|
+
# In down_proj
|
|
262
|
+
# n = hidden_size
|
|
263
|
+
# k = intermediate_size_per_partition_after_pad
|
|
264
|
+
intermediate_size_per_partition_after_pad = round_up(
|
|
265
|
+
intermediate_size_per_partition, 128
|
|
266
|
+
)
|
|
267
|
+
if current_platform.is_xpu():
|
|
268
|
+
hidden_size = round_up(hidden_size, 128)
|
|
269
|
+
else:
|
|
270
|
+
hidden_size = round_up(hidden_size, 256)
|
|
271
|
+
|
|
272
|
+
layer.params_dtype = params_dtype
|
|
273
|
+
layer.num_experts = num_experts
|
|
274
|
+
layer.hidden_size = hidden_size
|
|
275
|
+
layer.intermediate_size_per_partition = (
|
|
276
|
+
intermediate_size_per_partition_after_pad
|
|
277
|
+
)
|
|
278
|
+
elif (
|
|
279
|
+
self.mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_MXFP8_TRTLLM
|
|
280
|
+
or self.mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_BF16
|
|
281
|
+
):
|
|
282
|
+
# pad the intermediate size to be a multiple of 2 * mxfp4_block
|
|
283
|
+
# for to hold non-uniform sharded tensor as well as swizzling
|
|
284
|
+
# other padding to increase performance
|
|
285
|
+
intermediate_size_per_partition_after_pad = round_up(
|
|
286
|
+
intermediate_size_per_partition, 256
|
|
287
|
+
)
|
|
288
|
+
hidden_size = round_up(hidden_size, 256)
|
|
289
|
+
elif (
|
|
290
|
+
self.mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_MXFP8_CUTLASS
|
|
291
|
+
or self.mxfp4_backend == Mxfp4Backend.SM90_FI_MXFP4_BF16
|
|
292
|
+
):
|
|
293
|
+
intermediate_size_per_partition_after_pad = round_up(
|
|
294
|
+
intermediate_size_per_partition, 128
|
|
295
|
+
)
|
|
296
|
+
hidden_size = round_up(hidden_size, 128)
|
|
297
|
+
elif current_platform.is_rocm():
|
|
298
|
+
pad_align = get_padding_alignment()
|
|
299
|
+
intermediate_size_per_partition_after_pad = round_up(
|
|
300
|
+
intermediate_size_per_partition, pad_align
|
|
301
|
+
)
|
|
302
|
+
hidden_size = round_up(hidden_size, pad_align)
|
|
303
|
+
else:
|
|
304
|
+
intermediate_size_per_partition_after_pad = round_up(
|
|
305
|
+
intermediate_size_per_partition, 64
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
self.intermediate_size = intermediate_size_per_partition_after_pad
|
|
309
|
+
self.hidden_size = hidden_size
|
|
310
|
+
# Fused gate_up_proj (column parallel)
|
|
311
|
+
w13_weight = torch.nn.Parameter(
|
|
312
|
+
torch.zeros(
|
|
313
|
+
num_experts,
|
|
314
|
+
2 * intermediate_size_per_partition_after_pad,
|
|
315
|
+
hidden_size // 2,
|
|
316
|
+
dtype=weight_dtype,
|
|
317
|
+
),
|
|
318
|
+
requires_grad=False,
|
|
319
|
+
)
|
|
320
|
+
layer.register_parameter("w13_weight", w13_weight)
|
|
321
|
+
set_weight_attrs(w13_weight, extra_weight_attrs)
|
|
322
|
+
|
|
323
|
+
w13_weight_scale = torch.nn.Parameter(
|
|
324
|
+
torch.zeros(
|
|
325
|
+
num_experts,
|
|
326
|
+
2 * intermediate_size_per_partition_after_pad,
|
|
327
|
+
hidden_size // mxfp4_block,
|
|
328
|
+
dtype=scale_dtype,
|
|
329
|
+
),
|
|
330
|
+
requires_grad=False,
|
|
331
|
+
)
|
|
332
|
+
layer.register_parameter("w13_weight_scale", w13_weight_scale)
|
|
333
|
+
set_weight_attrs(w13_weight_scale, extra_weight_attrs)
|
|
334
|
+
|
|
335
|
+
w13_bias = torch.nn.Parameter(
|
|
336
|
+
torch.zeros(
|
|
337
|
+
num_experts,
|
|
338
|
+
2 * intermediate_size_per_partition_after_pad,
|
|
339
|
+
dtype=torch.bfloat16,
|
|
340
|
+
),
|
|
341
|
+
requires_grad=False,
|
|
342
|
+
)
|
|
343
|
+
layer.register_parameter("w13_bias", w13_bias)
|
|
344
|
+
set_weight_attrs(w13_bias, extra_weight_attrs)
|
|
345
|
+
|
|
346
|
+
# down_proj (row parallel)
|
|
347
|
+
w2_weight = torch.nn.Parameter(
|
|
348
|
+
torch.zeros(
|
|
349
|
+
num_experts,
|
|
350
|
+
hidden_size,
|
|
351
|
+
intermediate_size_per_partition_after_pad // 2,
|
|
352
|
+
dtype=weight_dtype,
|
|
353
|
+
),
|
|
354
|
+
requires_grad=False,
|
|
355
|
+
)
|
|
356
|
+
layer.register_parameter("w2_weight", w2_weight)
|
|
357
|
+
set_weight_attrs(w2_weight, extra_weight_attrs)
|
|
358
|
+
|
|
359
|
+
w2_weight_scale = torch.nn.Parameter(
|
|
360
|
+
torch.zeros(
|
|
361
|
+
num_experts,
|
|
362
|
+
hidden_size,
|
|
363
|
+
intermediate_size_per_partition_after_pad // mxfp4_block,
|
|
364
|
+
dtype=scale_dtype,
|
|
365
|
+
),
|
|
366
|
+
requires_grad=False,
|
|
367
|
+
)
|
|
368
|
+
layer.register_parameter("w2_weight_scale", w2_weight_scale)
|
|
369
|
+
set_weight_attrs(w2_weight_scale, extra_weight_attrs)
|
|
370
|
+
|
|
371
|
+
w2_bias = torch.nn.Parameter(
|
|
372
|
+
torch.zeros(
|
|
373
|
+
num_experts,
|
|
374
|
+
hidden_size,
|
|
375
|
+
dtype=torch.bfloat16,
|
|
376
|
+
),
|
|
377
|
+
requires_grad=False,
|
|
378
|
+
)
|
|
379
|
+
layer.register_parameter("w2_bias", w2_bias)
|
|
380
|
+
set_weight_attrs(w2_bias, extra_weight_attrs)
|
|
381
|
+
|
|
382
|
+
def process_weights_after_loading(self, layer):
|
|
383
|
+
if self.mxfp4_backend == Mxfp4Backend.MARLIN:
|
|
384
|
+
prepare_moe_fp4_layer_for_marlin(layer)
|
|
385
|
+
elif (
|
|
386
|
+
self.mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_MXFP8_TRTLLM
|
|
387
|
+
or self.mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_BF16
|
|
388
|
+
):
|
|
389
|
+
from flashinfer.fp4_quantization import nvfp4_block_scale_interleave
|
|
390
|
+
from flashinfer.fused_moe.core import get_w2_permute_indices_with_cache
|
|
391
|
+
|
|
392
|
+
layer.gemm1_alpha = Parameter(
|
|
393
|
+
torch.tensor([1.702] * self.num_experts, dtype=torch.float32).cuda(),
|
|
394
|
+
requires_grad=False,
|
|
395
|
+
)
|
|
396
|
+
layer.gemm1_beta = Parameter(
|
|
397
|
+
torch.tensor([1.0] * self.num_experts, dtype=torch.float32).cuda(),
|
|
398
|
+
requires_grad=False,
|
|
399
|
+
)
|
|
400
|
+
layer.gemm1_clamp_limit = Parameter(
|
|
401
|
+
torch.tensor([7.0] * self.num_experts, dtype=torch.float32).cuda(),
|
|
402
|
+
requires_grad=False,
|
|
403
|
+
)
|
|
404
|
+
sf_block_size = 32 # mxfp4 block size
|
|
405
|
+
|
|
406
|
+
assert (
|
|
407
|
+
layer.w13_weight.dim() == 3
|
|
408
|
+
and layer.w13_weight.shape[0] == self.num_experts
|
|
409
|
+
and layer.w13_weight.shape[1] == self.intermediate_size * 2
|
|
410
|
+
and layer.w13_weight.shape[2] == self.hidden_size // 2
|
|
411
|
+
)
|
|
412
|
+
assert (
|
|
413
|
+
layer.w13_weight_scale.dim() == 3
|
|
414
|
+
and layer.w13_weight_scale.shape[0] == self.num_experts
|
|
415
|
+
and layer.w13_weight_scale.shape[1] == self.intermediate_size * 2
|
|
416
|
+
and layer.w13_weight_scale.shape[2] == self.hidden_size // sf_block_size
|
|
417
|
+
)
|
|
418
|
+
assert (
|
|
419
|
+
layer.w2_weight.dim() == 3
|
|
420
|
+
and layer.w2_weight.shape[0] == self.num_experts
|
|
421
|
+
and layer.w2_weight.shape[1] == self.hidden_size
|
|
422
|
+
and layer.w2_weight.shape[2] == self.intermediate_size // 2
|
|
423
|
+
)
|
|
424
|
+
assert (
|
|
425
|
+
layer.w2_weight_scale.dim() == 3
|
|
426
|
+
and layer.w2_weight_scale.shape[1] == self.hidden_size
|
|
427
|
+
and layer.w2_weight_scale.shape[2]
|
|
428
|
+
== self.intermediate_size // sf_block_size
|
|
429
|
+
)
|
|
430
|
+
assert (
|
|
431
|
+
layer.w13_bias.dim() == 2
|
|
432
|
+
and layer.w13_bias.shape[0] == self.num_experts
|
|
433
|
+
and layer.w13_bias.shape[1] == self.intermediate_size * 2
|
|
434
|
+
)
|
|
435
|
+
assert (
|
|
436
|
+
layer.w2_bias.dim() == 2
|
|
437
|
+
and layer.w2_bias.shape[0] == self.num_experts
|
|
438
|
+
and layer.w2_bias.shape[1] == self.hidden_size
|
|
439
|
+
)
|
|
440
|
+
|
|
441
|
+
w13_weight_scale = layer.w13_weight_scale.data
|
|
442
|
+
w2_weight_scale = layer.w2_weight_scale.data
|
|
443
|
+
w13_weight = layer.w13_weight.data
|
|
444
|
+
w2_weight = layer.w2_weight.data
|
|
445
|
+
w13_bias = layer.w13_bias.data.to(torch.float32)
|
|
446
|
+
w2_bias = layer.w2_bias.data.to(torch.float32)
|
|
447
|
+
|
|
448
|
+
# Swap w1 and w3 as the definition of
|
|
449
|
+
# swiglu is different in the trtllm-gen
|
|
450
|
+
def swap_every_two_rows(x, axis=-1):
|
|
451
|
+
shape = x.shape
|
|
452
|
+
if axis < 0:
|
|
453
|
+
axis = len(shape) + axis
|
|
454
|
+
|
|
455
|
+
# Create a new shape with pairs swapped along specified axis
|
|
456
|
+
new_shape = list(shape)
|
|
457
|
+
new_shape[axis] = shape[axis] // 2
|
|
458
|
+
new_shape.insert(axis + 1, 2)
|
|
459
|
+
|
|
460
|
+
# Reshape to expose pairs, swap them, and reshape back
|
|
461
|
+
x = x.reshape(*new_shape)
|
|
462
|
+
x = x.flip(axis + 1)
|
|
463
|
+
new_shape = list(shape)
|
|
464
|
+
return x.reshape(*new_shape)
|
|
465
|
+
|
|
466
|
+
w13_weight_scale = swap_every_two_rows(w13_weight_scale, -2)
|
|
467
|
+
w13_weight = swap_every_two_rows(w13_weight, -2)
|
|
468
|
+
w13_bias = swap_every_two_rows(w13_bias, -1)
|
|
469
|
+
|
|
470
|
+
# Do not interleave as the checkpoint is already interleaved
|
|
471
|
+
|
|
472
|
+
# Shuffle weights and scaling factors for transposed mma output
|
|
473
|
+
gemm1_weights_mxfp4_shuffled = []
|
|
474
|
+
gemm1_scales_mxfp4_shuffled = []
|
|
475
|
+
gemm2_weights_mxfp4_shuffled = []
|
|
476
|
+
gemm2_scales_mxfp4_shuffled = []
|
|
477
|
+
gemm1_bias_shuffled = []
|
|
478
|
+
gemm2_bias_shuffled = []
|
|
479
|
+
epilogue_tile_m = 128 # FIXME: this depends on the kernel internals
|
|
480
|
+
for i in range(self.num_experts):
|
|
481
|
+
# w13 weight shuffling
|
|
482
|
+
permute_indices = get_w2_permute_indices_with_cache(
|
|
483
|
+
self._cache_permute_indices,
|
|
484
|
+
w13_weight[i].view(torch.uint8),
|
|
485
|
+
epilogue_tile_m,
|
|
486
|
+
)
|
|
487
|
+
gemm1_weights_mxfp4_shuffled.append(
|
|
488
|
+
w13_weight[i]
|
|
489
|
+
.view(torch.uint8)[permute_indices.to(w13_weight.device)]
|
|
490
|
+
.contiguous()
|
|
491
|
+
)
|
|
492
|
+
# w13 scale shuffling
|
|
493
|
+
permute_sf_indices = get_w2_permute_indices_with_cache(
|
|
494
|
+
self._cache_permute_indices,
|
|
495
|
+
w13_weight_scale[i].view(torch.uint8),
|
|
496
|
+
epilogue_tile_m,
|
|
497
|
+
num_elts_per_sf=16,
|
|
498
|
+
)
|
|
499
|
+
gemm1_scales_mxfp4_shuffled.append(
|
|
500
|
+
nvfp4_block_scale_interleave(
|
|
501
|
+
w13_weight_scale[i]
|
|
502
|
+
.view(torch.uint8)[
|
|
503
|
+
permute_sf_indices.to(w13_weight_scale.device)
|
|
504
|
+
]
|
|
505
|
+
.contiguous()
|
|
506
|
+
)
|
|
507
|
+
)
|
|
508
|
+
# w13 bias shuffling
|
|
509
|
+
permute_bias_indices = get_w2_permute_indices_with_cache(
|
|
510
|
+
self._cache_permute_indices,
|
|
511
|
+
w13_bias[i].clone().reshape(-1, 1),
|
|
512
|
+
epilogue_tile_m,
|
|
513
|
+
)
|
|
514
|
+
gemm1_bias_shuffled.append(
|
|
515
|
+
w13_bias[i]
|
|
516
|
+
.clone()
|
|
517
|
+
.reshape(-1, 1)[permute_bias_indices.to(w13_bias.device)]
|
|
518
|
+
.contiguous()
|
|
519
|
+
)
|
|
520
|
+
# w2 weight shuffling
|
|
521
|
+
permute_indices = get_w2_permute_indices_with_cache(
|
|
522
|
+
self._cache_permute_indices,
|
|
523
|
+
w2_weight[i].view(torch.uint8),
|
|
524
|
+
epilogue_tile_m,
|
|
525
|
+
)
|
|
526
|
+
gemm2_weights_mxfp4_shuffled.append(
|
|
527
|
+
w2_weight[i]
|
|
528
|
+
.view(torch.uint8)[permute_indices.to(w2_weight.device)]
|
|
529
|
+
.contiguous()
|
|
530
|
+
)
|
|
531
|
+
# w2 scale shuffling
|
|
532
|
+
permute_sf_indices = get_w2_permute_indices_with_cache(
|
|
533
|
+
self._cache_permute_indices,
|
|
534
|
+
w2_weight_scale[i].view(torch.uint8),
|
|
535
|
+
epilogue_tile_m,
|
|
536
|
+
num_elts_per_sf=16,
|
|
537
|
+
)
|
|
538
|
+
gemm2_scales_mxfp4_shuffled.append(
|
|
539
|
+
nvfp4_block_scale_interleave(
|
|
540
|
+
w2_weight_scale[i]
|
|
541
|
+
.view(torch.uint8)[
|
|
542
|
+
permute_sf_indices.to(w2_weight_scale.device)
|
|
543
|
+
]
|
|
544
|
+
.contiguous()
|
|
545
|
+
)
|
|
546
|
+
)
|
|
547
|
+
# w2 bias shuffling
|
|
548
|
+
permute_indices = get_w2_permute_indices_with_cache(
|
|
549
|
+
self._cache_permute_indices,
|
|
550
|
+
w2_bias[i].clone().reshape(-1, 1),
|
|
551
|
+
epilogue_tile_m,
|
|
552
|
+
)
|
|
553
|
+
gemm2_bias_shuffled.append(
|
|
554
|
+
w2_bias[i]
|
|
555
|
+
.clone()
|
|
556
|
+
.reshape(-1, 1)[permute_indices.to(w2_bias.device)]
|
|
557
|
+
.contiguous()
|
|
558
|
+
)
|
|
559
|
+
|
|
560
|
+
w13_weight = torch.stack(gemm1_weights_mxfp4_shuffled)
|
|
561
|
+
w13_weight_scale = (
|
|
562
|
+
torch.stack(gemm1_scales_mxfp4_shuffled)
|
|
563
|
+
.reshape(
|
|
564
|
+
self.num_experts,
|
|
565
|
+
2 * self.intermediate_size,
|
|
566
|
+
self.hidden_size // sf_block_size,
|
|
567
|
+
)
|
|
568
|
+
.view(torch.float8_e4m3fn)
|
|
569
|
+
)
|
|
570
|
+
|
|
571
|
+
w2_weight = torch.stack(gemm2_weights_mxfp4_shuffled)
|
|
572
|
+
w2_weight_scale = (
|
|
573
|
+
torch.stack(gemm2_scales_mxfp4_shuffled)
|
|
574
|
+
.reshape(
|
|
575
|
+
self.num_experts,
|
|
576
|
+
self.hidden_size,
|
|
577
|
+
self.intermediate_size // sf_block_size,
|
|
578
|
+
)
|
|
579
|
+
.view(torch.float8_e4m3fn)
|
|
580
|
+
)
|
|
581
|
+
|
|
582
|
+
layer.w13_weight = Parameter(w13_weight, requires_grad=False)
|
|
583
|
+
layer.w13_weight_scale = Parameter(w13_weight_scale, requires_grad=False)
|
|
584
|
+
layer.w2_weight = Parameter(w2_weight, requires_grad=False)
|
|
585
|
+
layer.w2_weight_scale = Parameter(w2_weight_scale, requires_grad=False)
|
|
586
|
+
layer.w13_bias = Parameter(
|
|
587
|
+
torch.stack(gemm1_bias_shuffled).reshape(self.num_experts, -1),
|
|
588
|
+
requires_grad=False,
|
|
589
|
+
)
|
|
590
|
+
layer.w2_bias = Parameter(
|
|
591
|
+
torch.stack(gemm2_bias_shuffled).reshape(self.num_experts, -1),
|
|
592
|
+
requires_grad=False,
|
|
593
|
+
)
|
|
594
|
+
elif (
|
|
595
|
+
self.mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_MXFP8_CUTLASS
|
|
596
|
+
or self.mxfp4_backend == Mxfp4Backend.SM90_FI_MXFP4_BF16
|
|
597
|
+
):
|
|
598
|
+
layer.gemm1_alpha = Parameter(
|
|
599
|
+
torch.tensor([1.702] * self.num_experts, dtype=torch.float32).cuda(),
|
|
600
|
+
requires_grad=False,
|
|
601
|
+
)
|
|
602
|
+
layer.gemm1_beta = Parameter(
|
|
603
|
+
torch.tensor([1.0] * self.num_experts, dtype=torch.float32).cuda(),
|
|
604
|
+
requires_grad=False,
|
|
605
|
+
)
|
|
606
|
+
layer.gemm1_clamp_limit = Parameter(
|
|
607
|
+
torch.tensor([7.0] * self.num_experts, dtype=torch.float32).cuda(),
|
|
608
|
+
requires_grad=False,
|
|
609
|
+
)
|
|
610
|
+
|
|
611
|
+
sf_block_size = 32 # mxfp4 block size
|
|
612
|
+
|
|
613
|
+
# Common shape assertions
|
|
614
|
+
assert (
|
|
615
|
+
layer.w13_weight.dim() == 3
|
|
616
|
+
and layer.w13_weight.shape[0] == self.num_experts
|
|
617
|
+
and layer.w13_weight.shape[1] == self.intermediate_size * 2
|
|
618
|
+
and layer.w13_weight.shape[2] == self.hidden_size // 2
|
|
619
|
+
)
|
|
620
|
+
assert (
|
|
621
|
+
layer.w13_weight_scale.dim() == 3
|
|
622
|
+
and layer.w13_weight_scale.shape[0] == self.num_experts
|
|
623
|
+
and layer.w13_weight_scale.shape[1] == self.intermediate_size * 2
|
|
624
|
+
and layer.w13_weight_scale.shape[2] == self.hidden_size // sf_block_size
|
|
625
|
+
)
|
|
626
|
+
assert (
|
|
627
|
+
layer.w2_weight.dim() == 3
|
|
628
|
+
and layer.w2_weight.shape[0] == self.num_experts
|
|
629
|
+
and layer.w2_weight.shape[1] == self.hidden_size
|
|
630
|
+
and layer.w2_weight.shape[2] == self.intermediate_size // 2
|
|
631
|
+
)
|
|
632
|
+
assert (
|
|
633
|
+
layer.w2_weight_scale.dim() == 3
|
|
634
|
+
and layer.w2_weight_scale.shape[1] == self.hidden_size
|
|
635
|
+
and layer.w2_weight_scale.shape[2]
|
|
636
|
+
== self.intermediate_size // sf_block_size
|
|
637
|
+
)
|
|
638
|
+
assert (
|
|
639
|
+
layer.w13_bias.dim() == 2
|
|
640
|
+
and layer.w13_bias.shape[0] == self.num_experts
|
|
641
|
+
and layer.w13_bias.shape[1] == self.intermediate_size * 2
|
|
642
|
+
)
|
|
643
|
+
assert (
|
|
644
|
+
layer.w2_bias.dim() == 2
|
|
645
|
+
and layer.w2_bias.shape[0] == self.num_experts
|
|
646
|
+
and layer.w2_bias.shape[1] == self.hidden_size
|
|
647
|
+
)
|
|
648
|
+
|
|
649
|
+
# De-interleave and swap for w13 weight, bias, and scales
|
|
650
|
+
w13_w = layer.w13_weight.data
|
|
651
|
+
gate_w, up_w = w13_w[:, ::2, :], w13_w[:, 1::2, :]
|
|
652
|
+
deinterleaved_w13_w = torch.cat([gate_w, up_w], dim=1)
|
|
653
|
+
w1_w, w3_w = torch.chunk(deinterleaved_w13_w, 2, dim=1)
|
|
654
|
+
w13_weight_swapped = torch.cat([w3_w, w1_w], dim=1)
|
|
655
|
+
|
|
656
|
+
w13_b = layer.w13_bias.data.to(torch.float32)
|
|
657
|
+
gate_b, up_b = w13_b[:, ::2], w13_b[:, 1::2]
|
|
658
|
+
deinterleaved_w13_b = torch.cat([gate_b, up_b], dim=1)
|
|
659
|
+
b1, b3 = torch.chunk(deinterleaved_w13_b, 2, dim=-1)
|
|
660
|
+
w13_bias_swapped = torch.cat([b3, b1], dim=-1).to(torch.bfloat16)
|
|
661
|
+
|
|
662
|
+
w13_s = layer.w13_weight_scale.data
|
|
663
|
+
gate_s, up_s = w13_s[:, ::2, :], w13_s[:, 1::2, :]
|
|
664
|
+
deinterleaved_w13_s = torch.cat([gate_s, up_s], dim=1)
|
|
665
|
+
s1, s3 = torch.chunk(deinterleaved_w13_s, 2, dim=1)
|
|
666
|
+
w13_scale_swapped = torch.cat([s3, s1], dim=1)
|
|
667
|
+
|
|
668
|
+
if self.mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_MXFP8_CUTLASS:
|
|
669
|
+
from flashinfer import block_scale_interleave
|
|
670
|
+
|
|
671
|
+
orig_shape = w13_scale_swapped.shape
|
|
672
|
+
w13_scale_interleaved = block_scale_interleave(
|
|
673
|
+
w13_scale_swapped.view(torch.uint8)
|
|
674
|
+
).reshape(orig_shape)
|
|
675
|
+
|
|
676
|
+
w2_s = layer.w2_weight_scale.data
|
|
677
|
+
orig_shape = w2_s.shape
|
|
678
|
+
w2_scale_interleaved = block_scale_interleave(
|
|
679
|
+
w2_s.view(torch.uint8)
|
|
680
|
+
).reshape(orig_shape)
|
|
681
|
+
|
|
682
|
+
layer.w13_weight = Parameter(w13_weight_swapped, requires_grad=False)
|
|
683
|
+
layer.w13_weight_scale = Parameter(
|
|
684
|
+
w13_scale_interleaved, requires_grad=False
|
|
685
|
+
)
|
|
686
|
+
layer.w13_bias = Parameter(w13_bias_swapped, requires_grad=False)
|
|
687
|
+
layer.w2_weight_scale = Parameter(
|
|
688
|
+
w2_scale_interleaved, requires_grad=False
|
|
689
|
+
)
|
|
690
|
+
elif self.mxfp4_backend == Mxfp4Backend.SM90_FI_MXFP4_BF16:
|
|
691
|
+
|
|
692
|
+
def _interleave_mxfp4_cutlass_sm90(w):
|
|
693
|
+
w_shape = w.shape
|
|
694
|
+
w_interleaved = w.reshape(
|
|
695
|
+
w_shape[0], w_shape[1], (w_shape[2] // 4), 4
|
|
696
|
+
)
|
|
697
|
+
w_interleaved = w_interleaved.permute(0, 2, 1, 3)
|
|
698
|
+
w_interleaved = w_interleaved.reshape(
|
|
699
|
+
w_shape[0], w_shape[2] // 4, w_shape[1] * 4
|
|
700
|
+
)
|
|
701
|
+
return w_interleaved
|
|
702
|
+
|
|
703
|
+
w31_scales = w13_scale_swapped.to(torch.uint8).view(torch.uint8)
|
|
704
|
+
w31_scales_interleaved = _interleave_mxfp4_cutlass_sm90(w31_scales)
|
|
705
|
+
|
|
706
|
+
w2_weight_scale = layer.w2_weight_scale.data
|
|
707
|
+
w2_scales = w2_weight_scale.to(torch.uint8).view(torch.uint8)
|
|
708
|
+
w2_scales_interleaved = _interleave_mxfp4_cutlass_sm90(w2_scales)
|
|
709
|
+
|
|
710
|
+
layer.w13_weight = torch.nn.Parameter(
|
|
711
|
+
torch.cat([w3_w, w1_w], dim=1), requires_grad=False
|
|
712
|
+
)
|
|
713
|
+
layer.w13_bias = torch.nn.Parameter(
|
|
714
|
+
w13_bias_swapped, requires_grad=False
|
|
715
|
+
)
|
|
716
|
+
layer.w13_weight_scale = torch.nn.Parameter(
|
|
717
|
+
w31_scales_interleaved, requires_grad=False
|
|
718
|
+
)
|
|
719
|
+
layer.w2_weight_scale = torch.nn.Parameter(
|
|
720
|
+
w2_scales_interleaved, requires_grad=False
|
|
721
|
+
)
|
|
722
|
+
elif self.mxfp4_backend == Mxfp4Backend.TRITON:
|
|
723
|
+
from triton_kernels.matmul_ogs import FlexCtx, PrecisionConfig
|
|
724
|
+
|
|
725
|
+
w13_bias = layer.w13_bias.to(torch.float32)
|
|
726
|
+
w2_bias = layer.w2_bias.to(torch.float32)
|
|
727
|
+
|
|
728
|
+
layer.w13_bias = Parameter(w13_bias, requires_grad=False)
|
|
729
|
+
layer.w2_bias = Parameter(w2_bias, requires_grad=False)
|
|
730
|
+
|
|
731
|
+
# Ideally we'd use FusedMoEModularKernel.prepare_finalize object
|
|
732
|
+
# (stored in self.fused_experts) to determine if the MoE has a
|
|
733
|
+
# batched activation format. As self.fused_experts is not
|
|
734
|
+
# initialized at this point, we resort to checking the MoE config
|
|
735
|
+
# directly.
|
|
736
|
+
is_batched_moe = self.moe.use_pplx_kernels or self.moe.use_deepep_ll_kernels
|
|
737
|
+
if is_batched_moe:
|
|
738
|
+
num_warps = 4 if envs.VLLM_MOE_DP_CHUNK_SIZE <= 512 else 8
|
|
739
|
+
else:
|
|
740
|
+
num_warps = 8
|
|
741
|
+
|
|
742
|
+
w13_weight, w13_flex, w13_scale = _swizzle_mxfp4(
|
|
743
|
+
layer.w13_weight, layer.w13_weight_scale, num_warps
|
|
744
|
+
)
|
|
745
|
+
w2_weight, w2_flex, w2_scale = _swizzle_mxfp4(
|
|
746
|
+
layer.w2_weight, layer.w2_weight_scale, num_warps
|
|
747
|
+
)
|
|
748
|
+
|
|
749
|
+
self.w13_precision_config = PrecisionConfig(
|
|
750
|
+
weight_scale=w13_scale, flex_ctx=FlexCtx(rhs_data=w13_flex)
|
|
751
|
+
)
|
|
752
|
+
self.w2_precision_config = PrecisionConfig(
|
|
753
|
+
weight_scale=w2_scale, flex_ctx=FlexCtx(rhs_data=w2_flex)
|
|
754
|
+
)
|
|
755
|
+
|
|
756
|
+
self.w13_weight = w13_weight
|
|
757
|
+
self.w2_weight = w2_weight
|
|
758
|
+
layer.w13_weight = Parameter(w13_weight.storage.data, requires_grad=False)
|
|
759
|
+
layer.w2_weight = Parameter(w2_weight.storage.data, requires_grad=False)
|
|
760
|
+
else:
|
|
761
|
+
raise ValueError(f"Unsupported backend: {self.mxfp4_backend}")
|
|
762
|
+
|
|
763
|
+
def get_fused_moe_quant_config(
|
|
764
|
+
self, layer: torch.nn.Module
|
|
765
|
+
) -> FusedMoEQuantConfig | None:
|
|
766
|
+
if self.mxfp4_backend == Mxfp4Backend.MARLIN:
|
|
767
|
+
return mxfp4_w4a16_moe_quant_config(
|
|
768
|
+
w1_bias=layer.w13_bias,
|
|
769
|
+
w2_bias=layer.w2_bias,
|
|
770
|
+
w1_scale=layer.w13_weight_scale,
|
|
771
|
+
w2_scale=layer.w2_weight_scale,
|
|
772
|
+
)
|
|
773
|
+
elif self.mxfp4_backend == Mxfp4Backend.TRITON:
|
|
774
|
+
w1_scale = self.w13_precision_config
|
|
775
|
+
w2_scale = self.w2_precision_config
|
|
776
|
+
return mxfp4_w4a16_moe_quant_config(
|
|
777
|
+
w1_bias=layer.w13_bias,
|
|
778
|
+
w2_bias=layer.w2_bias,
|
|
779
|
+
w1_scale=w1_scale,
|
|
780
|
+
w2_scale=w2_scale,
|
|
781
|
+
)
|
|
782
|
+
elif self.mxfp4_backend in [
|
|
783
|
+
Mxfp4Backend.SM100_FI_MXFP4_MXFP8_TRTLLM,
|
|
784
|
+
Mxfp4Backend.SM100_FI_MXFP4_MXFP8_CUTLASS,
|
|
785
|
+
]:
|
|
786
|
+
return mxfp4_mxfp8_moe_quant_config(
|
|
787
|
+
w1_bias=layer.w13_bias,
|
|
788
|
+
w2_bias=layer.w2_bias,
|
|
789
|
+
w1_scale=layer.w13_weight_scale,
|
|
790
|
+
w2_scale=layer.w2_weight_scale,
|
|
791
|
+
)
|
|
792
|
+
elif self.mxfp4_backend in [Mxfp4Backend.SM100_FI_MXFP4_BF16]:
|
|
793
|
+
return mxfp4_w4a16_moe_quant_config(
|
|
794
|
+
w1_bias=layer.w13_bias,
|
|
795
|
+
w2_bias=layer.w2_bias,
|
|
796
|
+
w1_scale=layer.w13_weight_scale,
|
|
797
|
+
w2_scale=layer.w2_weight_scale,
|
|
798
|
+
)
|
|
799
|
+
else:
|
|
800
|
+
w1_scale = layer.w13_weight_scale
|
|
801
|
+
w2_scale = layer.w2_weight_scale
|
|
802
|
+
return ocp_mx_moe_quant_config(
|
|
803
|
+
quant_dtype="mxfp4",
|
|
804
|
+
w1_bias=layer.w13_bias,
|
|
805
|
+
w2_bias=layer.w2_bias,
|
|
806
|
+
w1_scale=w1_scale,
|
|
807
|
+
w2_scale=w2_scale,
|
|
808
|
+
)
|
|
809
|
+
|
|
810
|
+
def select_gemm_impl(
|
|
811
|
+
self,
|
|
812
|
+
prepare_finalize: mk.FusedMoEPrepareAndFinalize,
|
|
813
|
+
layer: torch.nn.Module,
|
|
814
|
+
) -> mk.FusedMoEPermuteExpertsUnpermute:
|
|
815
|
+
if (
|
|
816
|
+
prepare_finalize.activation_format
|
|
817
|
+
== mk.FusedMoEActivationFormat.BatchedExperts
|
|
818
|
+
):
|
|
819
|
+
if self.mxfp4_backend == Mxfp4Backend.MARLIN:
|
|
820
|
+
max_num_tokens_per_rank = prepare_finalize.max_num_tokens_per_rank()
|
|
821
|
+
assert max_num_tokens_per_rank is not None
|
|
822
|
+
assert self.moe_quant_config is not None
|
|
823
|
+
return BatchedMarlinExperts(
|
|
824
|
+
max_num_tokens=max_num_tokens_per_rank,
|
|
825
|
+
num_dispatchers=prepare_finalize.num_dispatchers(),
|
|
826
|
+
quant_config=self.moe_quant_config,
|
|
827
|
+
)
|
|
828
|
+
else:
|
|
829
|
+
raise NotImplementedError(
|
|
830
|
+
f"Incompatible Mxfp4 backend ({self.mxfp4_backend}) for "
|
|
831
|
+
"EP batched experts format"
|
|
832
|
+
)
|
|
833
|
+
else:
|
|
834
|
+
assert self.moe_quant_config is not None
|
|
835
|
+
if (
|
|
836
|
+
self.mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_MXFP8_TRTLLM
|
|
837
|
+
or self.mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_BF16
|
|
838
|
+
):
|
|
839
|
+
# B200 code-path
|
|
840
|
+
kwargs = {
|
|
841
|
+
"gemm1_alpha": layer.gemm1_alpha,
|
|
842
|
+
"gemm1_beta": layer.gemm1_beta,
|
|
843
|
+
"gemm1_clamp_limit": layer.gemm1_clamp_limit,
|
|
844
|
+
# TODO(bnell): part of quant_config
|
|
845
|
+
"max_capture_size": self.max_capture_size,
|
|
846
|
+
}
|
|
847
|
+
return TrtLlmGenExperts(self.moe, self.moe_quant_config, **kwargs)
|
|
848
|
+
elif self.mxfp4_backend == Mxfp4Backend.MARLIN:
|
|
849
|
+
return MarlinExperts(self.moe_quant_config)
|
|
850
|
+
elif self.mxfp4_backend == Mxfp4Backend.TRITON:
|
|
851
|
+
return OAITritonExperts(self.moe_quant_config)
|
|
852
|
+
else:
|
|
853
|
+
raise NotImplementedError(
|
|
854
|
+
f"Incompatible Mxfp4 backend ({self.mxfp4_backend}) for EP"
|
|
855
|
+
)
|
|
856
|
+
|
|
857
|
+
@property
|
|
858
|
+
def allow_inplace(self) -> bool:
|
|
859
|
+
return True
|
|
860
|
+
|
|
861
|
+
def apply(
|
|
862
|
+
self,
|
|
863
|
+
layer: torch.nn.Module,
|
|
864
|
+
x: torch.Tensor,
|
|
865
|
+
router_logits: torch.Tensor,
|
|
866
|
+
top_k: int,
|
|
867
|
+
renormalize: bool,
|
|
868
|
+
use_grouped_topk: bool = False,
|
|
869
|
+
topk_group: int | None = None,
|
|
870
|
+
num_expert_group: int | None = None,
|
|
871
|
+
global_num_experts: int = -1,
|
|
872
|
+
expert_map: torch.Tensor | None = None,
|
|
873
|
+
custom_routing_function: Callable | None = None,
|
|
874
|
+
scoring_func: str = "softmax",
|
|
875
|
+
routed_scaling_factor: float = 1.0,
|
|
876
|
+
e_score_correction_bias: torch.Tensor | None = None,
|
|
877
|
+
apply_router_weight_on_input: bool = False,
|
|
878
|
+
activation: str = "silu",
|
|
879
|
+
enable_eplb: bool = False,
|
|
880
|
+
expert_load_view: torch.Tensor | None = None,
|
|
881
|
+
logical_to_physical_map: torch.Tensor | None = None,
|
|
882
|
+
logical_replica_count: torch.Tensor | None = None,
|
|
883
|
+
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
|
|
884
|
+
if enable_eplb:
|
|
885
|
+
raise NotImplementedError("EPLB is not supported for mxfp4")
|
|
886
|
+
|
|
887
|
+
if self.mxfp4_backend == Mxfp4Backend.MARLIN:
|
|
888
|
+
topk_weights, topk_ids, _ = FusedMoE.select_experts(
|
|
889
|
+
hidden_states=x,
|
|
890
|
+
router_logits=router_logits,
|
|
891
|
+
use_grouped_topk=use_grouped_topk,
|
|
892
|
+
top_k=top_k,
|
|
893
|
+
renormalize=renormalize,
|
|
894
|
+
topk_group=topk_group,
|
|
895
|
+
num_expert_group=num_expert_group,
|
|
896
|
+
custom_routing_function=custom_routing_function,
|
|
897
|
+
scoring_func=scoring_func,
|
|
898
|
+
routed_scaling_factor=routed_scaling_factor,
|
|
899
|
+
e_score_correction_bias=e_score_correction_bias,
|
|
900
|
+
)
|
|
901
|
+
|
|
902
|
+
return fused_marlin_moe(
|
|
903
|
+
x,
|
|
904
|
+
layer.w13_weight,
|
|
905
|
+
layer.w2_weight,
|
|
906
|
+
layer.w13_bias,
|
|
907
|
+
layer.w2_bias,
|
|
908
|
+
layer.w13_weight_scale,
|
|
909
|
+
layer.w2_weight_scale,
|
|
910
|
+
router_logits,
|
|
911
|
+
topk_weights,
|
|
912
|
+
topk_ids,
|
|
913
|
+
global_scale1=None,
|
|
914
|
+
global_scale2=None,
|
|
915
|
+
quant_type_id=scalar_types.float4_e2m1f.id,
|
|
916
|
+
apply_router_weight_on_input=apply_router_weight_on_input,
|
|
917
|
+
global_num_experts=global_num_experts,
|
|
918
|
+
activation=activation,
|
|
919
|
+
expert_map=expert_map,
|
|
920
|
+
)
|
|
921
|
+
|
|
922
|
+
assert _can_support_mxfp4(
|
|
923
|
+
use_grouped_topk,
|
|
924
|
+
topk_group,
|
|
925
|
+
num_expert_group,
|
|
926
|
+
expert_map,
|
|
927
|
+
custom_routing_function,
|
|
928
|
+
e_score_correction_bias,
|
|
929
|
+
apply_router_weight_on_input,
|
|
930
|
+
scoring_func,
|
|
931
|
+
activation,
|
|
932
|
+
expert_load_view,
|
|
933
|
+
logical_to_physical_map,
|
|
934
|
+
logical_replica_count,
|
|
935
|
+
), "MXFP4 are not supported with this configuration."
|
|
936
|
+
|
|
937
|
+
if (
|
|
938
|
+
self.mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_MXFP8_TRTLLM
|
|
939
|
+
or self.mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_BF16
|
|
940
|
+
):
|
|
941
|
+
from flashinfer import trtllm_fp4_block_scale_moe
|
|
942
|
+
|
|
943
|
+
if self.mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_BF16:
|
|
944
|
+
assert x.dtype == torch.bfloat16
|
|
945
|
+
x_quant = x
|
|
946
|
+
x_scale = None
|
|
947
|
+
elif self.mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_MXFP8_TRTLLM:
|
|
948
|
+
from flashinfer import mxfp8_quantize
|
|
949
|
+
|
|
950
|
+
x_quant, x_scale = mxfp8_quantize(x, False) # to mxfp8
|
|
951
|
+
x_scale = x_scale.view(torch.float8_e4m3fn).reshape(*x.shape[:-1], -1)
|
|
952
|
+
|
|
953
|
+
trtllm_gen_output = trtllm_fp4_block_scale_moe(
|
|
954
|
+
router_logits.to(torch.bfloat16),
|
|
955
|
+
None, # routing_bias
|
|
956
|
+
x_quant,
|
|
957
|
+
x_scale,
|
|
958
|
+
layer.w13_weight, # uint8 (e2m1 x 2)
|
|
959
|
+
layer.w13_weight_scale, # uint8 (e4m3 x 2)
|
|
960
|
+
layer.w13_bias, # fp32 per expert per channel
|
|
961
|
+
layer.gemm1_alpha, # fp32 per expert
|
|
962
|
+
layer.gemm1_beta, # fp32 per expert
|
|
963
|
+
layer.gemm1_clamp_limit, # fp32 per expert
|
|
964
|
+
layer.w2_weight, # uint8 (e2m1 x 2)
|
|
965
|
+
layer.w2_weight_scale, # ue8m0
|
|
966
|
+
layer.w2_bias, # fp32 per expert per channel
|
|
967
|
+
None, # output1_scale_scalar
|
|
968
|
+
None, # output1_scale_gate_scalar
|
|
969
|
+
None, # output2_scale_scalar
|
|
970
|
+
global_num_experts,
|
|
971
|
+
top_k,
|
|
972
|
+
None, # n_group
|
|
973
|
+
None, # topk_group
|
|
974
|
+
self.intermediate_size, # padded to multiple of 256
|
|
975
|
+
layer.ep_rank * layer.local_num_experts, # local_expert_offset
|
|
976
|
+
self.num_experts, # local num experts
|
|
977
|
+
None,
|
|
978
|
+
None,
|
|
979
|
+
1 if renormalize else 0, # routing_method_type, renormalize
|
|
980
|
+
True, # do finalize
|
|
981
|
+
tune_max_num_tokens=max(self.max_capture_size, 1),
|
|
982
|
+
)[0]
|
|
983
|
+
return trtllm_gen_output
|
|
984
|
+
elif (
|
|
985
|
+
self.mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_MXFP8_CUTLASS
|
|
986
|
+
or self.mxfp4_backend == Mxfp4Backend.SM90_FI_MXFP4_BF16
|
|
987
|
+
):
|
|
988
|
+
from vllm.utils.flashinfer import flashinfer_cutlass_fused_moe
|
|
989
|
+
|
|
990
|
+
topk_weights, topk_ids, _ = FusedMoE.select_experts(
|
|
991
|
+
hidden_states=x,
|
|
992
|
+
router_logits=router_logits,
|
|
993
|
+
use_grouped_topk=use_grouped_topk,
|
|
994
|
+
top_k=top_k,
|
|
995
|
+
renormalize=renormalize,
|
|
996
|
+
topk_group=topk_group,
|
|
997
|
+
num_expert_group=num_expert_group,
|
|
998
|
+
custom_routing_function=custom_routing_function,
|
|
999
|
+
scoring_func=scoring_func,
|
|
1000
|
+
e_score_correction_bias=e_score_correction_bias,
|
|
1001
|
+
)
|
|
1002
|
+
|
|
1003
|
+
# Backend-specific preparation
|
|
1004
|
+
if self.mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_MXFP8_CUTLASS:
|
|
1005
|
+
from flashinfer import mxfp8_quantize
|
|
1006
|
+
|
|
1007
|
+
x_quant, x_scale = mxfp8_quantize(x, True, 32)
|
|
1008
|
+
|
|
1009
|
+
fake_input_scale = torch.ones(self.num_experts, device=x.device)
|
|
1010
|
+
quant_scales = [
|
|
1011
|
+
layer.w13_weight_scale.contiguous().view(torch.int32),
|
|
1012
|
+
fake_input_scale,
|
|
1013
|
+
layer.w2_weight_scale.contiguous().view(torch.int32),
|
|
1014
|
+
fake_input_scale,
|
|
1015
|
+
]
|
|
1016
|
+
|
|
1017
|
+
fi_input = x_quant
|
|
1018
|
+
extra_kwargs = dict(
|
|
1019
|
+
use_mxfp8_act_scaling=True,
|
|
1020
|
+
input_sf=x_scale,
|
|
1021
|
+
fc1_expert_weights=layer.w13_weight.contiguous().view(torch.long),
|
|
1022
|
+
fc2_expert_weights=layer.w2_weight.contiguous().view(torch.long),
|
|
1023
|
+
)
|
|
1024
|
+
elif self.mxfp4_backend == Mxfp4Backend.SM90_FI_MXFP4_BF16:
|
|
1025
|
+
assert x.dtype == torch.bfloat16
|
|
1026
|
+
|
|
1027
|
+
quant_scales = [
|
|
1028
|
+
layer.w13_weight_scale,
|
|
1029
|
+
layer.w2_weight_scale,
|
|
1030
|
+
]
|
|
1031
|
+
|
|
1032
|
+
fi_input = x
|
|
1033
|
+
extra_kwargs = dict(
|
|
1034
|
+
use_w4_group_scaling=True,
|
|
1035
|
+
fc1_expert_weights=layer.w13_weight,
|
|
1036
|
+
fc2_expert_weights=layer.w2_weight,
|
|
1037
|
+
)
|
|
1038
|
+
|
|
1039
|
+
output = torch.empty_like(x, dtype=torch.bfloat16)
|
|
1040
|
+
_ = flashinfer_cutlass_fused_moe(
|
|
1041
|
+
input=fi_input,
|
|
1042
|
+
token_selected_experts=topk_ids.to(torch.int).contiguous(),
|
|
1043
|
+
token_final_scales=topk_weights,
|
|
1044
|
+
output_dtype=torch.bfloat16,
|
|
1045
|
+
output=output,
|
|
1046
|
+
quant_scales=quant_scales,
|
|
1047
|
+
fc1_expert_biases=layer.w13_bias,
|
|
1048
|
+
fc2_expert_biases=layer.w2_bias,
|
|
1049
|
+
swiglu_alpha=layer.gemm1_alpha,
|
|
1050
|
+
swiglu_beta=layer.gemm1_beta,
|
|
1051
|
+
swiglu_limit=layer.gemm1_clamp_limit,
|
|
1052
|
+
tp_size=self.moe.tp_size,
|
|
1053
|
+
tp_rank=self.moe.tp_rank,
|
|
1054
|
+
ep_size=self.moe.ep_size,
|
|
1055
|
+
ep_rank=self.moe.ep_rank,
|
|
1056
|
+
tune_max_num_tokens=max(self.max_capture_size, 1),
|
|
1057
|
+
**extra_kwargs,
|
|
1058
|
+
)
|
|
1059
|
+
|
|
1060
|
+
return output
|
|
1061
|
+
elif self.mxfp4_backend == Mxfp4Backend.TRITON:
|
|
1062
|
+
from vllm.model_executor.layers.fused_moe.gpt_oss_triton_kernels_moe import ( # noqa: E501
|
|
1063
|
+
triton_kernel_moe_forward,
|
|
1064
|
+
)
|
|
1065
|
+
|
|
1066
|
+
return triton_kernel_moe_forward(
|
|
1067
|
+
hidden_states=x,
|
|
1068
|
+
w1=self.w13_weight,
|
|
1069
|
+
w2=self.w2_weight,
|
|
1070
|
+
gating_output=router_logits,
|
|
1071
|
+
topk=top_k,
|
|
1072
|
+
renormalize=renormalize,
|
|
1073
|
+
global_num_experts=global_num_experts,
|
|
1074
|
+
expert_map=expert_map,
|
|
1075
|
+
quant_config=self.moe_quant_config,
|
|
1076
|
+
apply_router_weight_on_input=apply_router_weight_on_input,
|
|
1077
|
+
)
|
|
1078
|
+
else:
|
|
1079
|
+
raise ValueError(f"Unsupported backend: {self.mxfp4_backend}")
|
|
1080
|
+
|
|
1081
|
+
|
|
1082
|
+
class IpexMxfp4MoEMethod(Mxfp4MoEMethod):
|
|
1083
|
+
def __init__(self, moe_config: FusedMoEConfig):
|
|
1084
|
+
super().__init__(moe_config)
|
|
1085
|
+
self.moe_config = moe_config
|
|
1086
|
+
|
|
1087
|
+
def create_weights(
|
|
1088
|
+
self,
|
|
1089
|
+
layer: torch.nn.Module,
|
|
1090
|
+
num_experts: int,
|
|
1091
|
+
hidden_size: int,
|
|
1092
|
+
intermediate_size_per_partition: int,
|
|
1093
|
+
params_dtype: torch.dtype,
|
|
1094
|
+
**extra_weight_attrs,
|
|
1095
|
+
):
|
|
1096
|
+
super().create_weights(
|
|
1097
|
+
layer,
|
|
1098
|
+
num_experts,
|
|
1099
|
+
hidden_size,
|
|
1100
|
+
intermediate_size_per_partition,
|
|
1101
|
+
params_dtype,
|
|
1102
|
+
**extra_weight_attrs,
|
|
1103
|
+
)
|
|
1104
|
+
self.original_hidden_size = hidden_size
|
|
1105
|
+
|
|
1106
|
+
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
|
|
1107
|
+
import intel_extension_for_pytorch as ipex
|
|
1108
|
+
|
|
1109
|
+
layer.w13_weight.data = layer.w13_weight.data.view(torch.int32)
|
|
1110
|
+
layer.w2_weight.data = layer.w2_weight.data.view(torch.int32)
|
|
1111
|
+
ep_rank_start = self.moe_config.ep_rank * self.moe_config.num_local_experts
|
|
1112
|
+
layer.ipex_fusion = ipex.llm.modules.GatedMLPMOE(
|
|
1113
|
+
layer.w13_weight,
|
|
1114
|
+
layer.w2_weight,
|
|
1115
|
+
w1_scale_inv=layer.w13_weight_scale,
|
|
1116
|
+
w2_scale_inv=layer.w2_weight_scale,
|
|
1117
|
+
w13_bias=layer.w13_bias,
|
|
1118
|
+
w2_bias=layer.w2_bias,
|
|
1119
|
+
is_mxfp4=True,
|
|
1120
|
+
experts_start_id=ep_rank_start,
|
|
1121
|
+
)
|
|
1122
|
+
|
|
1123
|
+
def apply(
|
|
1124
|
+
self,
|
|
1125
|
+
layer: torch.nn.Module,
|
|
1126
|
+
x: torch.Tensor,
|
|
1127
|
+
router_logits: torch.Tensor,
|
|
1128
|
+
top_k: int,
|
|
1129
|
+
renormalize: bool,
|
|
1130
|
+
use_grouped_topk: bool = False,
|
|
1131
|
+
topk_group: int | None = None,
|
|
1132
|
+
num_expert_group: int | None = None,
|
|
1133
|
+
global_num_experts: int = -1,
|
|
1134
|
+
expert_map: torch.Tensor | None = None,
|
|
1135
|
+
custom_routing_function: Callable | None = None,
|
|
1136
|
+
scoring_func: str = "softmax",
|
|
1137
|
+
routed_scaling_factor: float = 1.0,
|
|
1138
|
+
e_score_correction_bias: torch.Tensor | None = None,
|
|
1139
|
+
apply_router_weight_on_input: bool = False,
|
|
1140
|
+
activation: str = "silu",
|
|
1141
|
+
enable_eplb: bool = False,
|
|
1142
|
+
expert_load_view: torch.Tensor | None = None,
|
|
1143
|
+
logical_to_physical_map: torch.Tensor | None = None,
|
|
1144
|
+
logical_replica_count: torch.Tensor | None = None,
|
|
1145
|
+
) -> torch.Tensor:
|
|
1146
|
+
assert activation == "swigluoai", (
|
|
1147
|
+
"Only swiglu_oai activation is supported for IPEX MXFP4 MoE"
|
|
1148
|
+
)
|
|
1149
|
+
hidden_size_pad = round_up(self.original_hidden_size, 128)
|
|
1150
|
+
x_pad = torch.nn.functional.pad(x, (0, hidden_size_pad - x.size(-1)))
|
|
1151
|
+
hidden_states = layer.ipex_fusion(
|
|
1152
|
+
x_pad,
|
|
1153
|
+
use_grouped_topk,
|
|
1154
|
+
top_k,
|
|
1155
|
+
router_logits,
|
|
1156
|
+
renormalize,
|
|
1157
|
+
topk_group,
|
|
1158
|
+
num_expert_group,
|
|
1159
|
+
activation="swiglu_oai",
|
|
1160
|
+
)
|
|
1161
|
+
hidden_states = hidden_states[..., : self.original_hidden_size].contiguous()
|
|
1162
|
+
return hidden_states
|