sglang 0.4.6.post4__py3-none-any.whl → 0.4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sglang/bench_offline_throughput.py +16 -10
- sglang/bench_one_batch.py +5 -4
- sglang/bench_one_batch_server.py +86 -22
- sglang/bench_serving.py +197 -110
- sglang/compile_deep_gemm.py +4 -4
- sglang/lang/backend/runtime_endpoint.py +24 -1
- sglang/profiler.py +167 -0
- sglang/srt/_custom_ops.py +34 -0
- sglang/srt/configs/internvl.py +8 -12
- sglang/srt/configs/model_config.py +66 -29
- sglang/srt/constrained/base_grammar_backend.py +5 -2
- sglang/srt/constrained/llguidance_backend.py +9 -8
- sglang/srt/constrained/outlines_backend.py +5 -4
- sglang/srt/constrained/xgrammar_backend.py +18 -18
- sglang/srt/conversation.py +47 -9
- sglang/srt/custom_op.py +38 -3
- sglang/srt/debug_utils.py +74 -0
- sglang/srt/disaggregation/common/__init__.py +1 -0
- sglang/srt/disaggregation/common/conn.py +407 -0
- sglang/srt/disaggregation/decode.py +187 -134
- sglang/srt/disaggregation/decode_schedule_batch_mixin.py +142 -0
- sglang/srt/disaggregation/fake/conn.py +4 -13
- sglang/srt/disaggregation/kv_events.py +412 -0
- sglang/srt/disaggregation/launch_lb.py +140 -0
- sglang/srt/disaggregation/mini_lb.py +84 -70
- sglang/srt/disaggregation/mooncake/conn.py +441 -140
- sglang/srt/disaggregation/mooncake/transfer_engine.py +31 -14
- sglang/srt/disaggregation/nixl/conn.py +124 -442
- sglang/srt/disaggregation/prefill.py +128 -44
- sglang/srt/disaggregation/utils.py +154 -6
- sglang/srt/distributed/device_communicators/pymscclpp.py +315 -0
- sglang/srt/distributed/parallel_state.py +52 -5
- sglang/srt/distributed/utils.py +3 -3
- sglang/srt/entrypoints/EngineBase.py +11 -0
- sglang/srt/entrypoints/engine.py +129 -12
- sglang/srt/entrypoints/http_server.py +21 -6
- sglang/srt/entrypoints/http_server_engine.py +5 -2
- sglang/srt/function_call/base_format_detector.py +302 -0
- sglang/srt/function_call/core_types.py +34 -0
- sglang/srt/function_call/deepseekv3_detector.py +205 -0
- sglang/srt/function_call/ebnf_composer.py +248 -0
- sglang/srt/function_call/function_call_parser.py +202 -0
- sglang/srt/function_call/llama32_detector.py +93 -0
- sglang/srt/function_call/mistral_detector.py +131 -0
- sglang/srt/function_call/pythonic_detector.py +229 -0
- sglang/srt/function_call/qwen25_detector.py +121 -0
- sglang/srt/function_call/utils.py +52 -0
- sglang/srt/hf_transformers_utils.py +50 -7
- sglang/srt/layers/attention/aiter_backend.py +878 -0
- sglang/srt/layers/attention/base_attn_backend.py +4 -0
- sglang/srt/layers/attention/cutlass_mla_backend.py +2 -19
- sglang/srt/layers/attention/flashattention_backend.py +166 -35
- sglang/srt/layers/attention/flashinfer_backend.py +45 -1
- sglang/srt/layers/attention/flashinfer_mla_backend.py +45 -5
- sglang/srt/layers/attention/flashmla_backend.py +340 -78
- sglang/srt/layers/attention/intel_amx_backend.py +128 -0
- sglang/srt/layers/attention/tbo_backend.py +232 -0
- sglang/srt/layers/attention/torch_native_backend.py +3 -0
- sglang/srt/layers/attention/triton_backend.py +247 -5
- sglang/srt/layers/attention/triton_ops/extend_attention.py +12 -4
- sglang/srt/layers/attention/utils.py +2 -2
- sglang/srt/layers/attention/vision.py +1 -1
- sglang/srt/layers/communicator.py +517 -0
- sglang/srt/layers/dp_attention.py +6 -15
- sglang/srt/layers/layernorm.py +30 -19
- sglang/srt/layers/moe/cutlass_moe.py +370 -0
- sglang/srt/layers/moe/cutlass_moe_params.py +169 -0
- sglang/srt/layers/moe/ep_moe/kernels.py +60 -17
- sglang/srt/layers/moe/ep_moe/layer.py +195 -87
- sglang/srt/layers/moe/ep_moe/token_dispatcher.py +88 -8
- sglang/srt/layers/moe/fused_moe_native.py +4 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=257,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/fused_moe.py +220 -25
- sglang/srt/layers/moe/fused_moe_triton/layer.py +48 -4
- sglang/srt/layers/moe/topk.py +107 -24
- sglang/srt/layers/multimodal.py +70 -0
- sglang/srt/layers/quantization/__init__.py +10 -4
- sglang/srt/layers/quantization/blockwise_int8.py +3 -0
- sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +5 -0
- sglang/srt/layers/quantization/deep_gemm.py +60 -59
- sglang/srt/layers/quantization/fp8.py +113 -18
- sglang/srt/layers/quantization/fp8_kernel.py +118 -66
- sglang/srt/layers/quantization/fp8_utils.py +165 -43
- sglang/srt/layers/quantization/gptq.py +298 -6
- sglang/srt/layers/quantization/int8_kernel.py +18 -5
- sglang/srt/layers/quantization/modelopt_quant.py +334 -7
- sglang/srt/layers/quantization/moe_wna16.py +3 -0
- sglang/srt/layers/quantization/qoq.py +244 -0
- sglang/srt/layers/quantization/w8a8_fp8.py +3 -0
- sglang/srt/layers/quantization/w8a8_int8.py +3 -0
- sglang/srt/layers/rotary_embedding.py +6 -12
- sglang/srt/layers/sampler.py +80 -79
- sglang/srt/layers/utils.py +6 -0
- sglang/srt/lora/layers.py +12 -15
- sglang/srt/lora/lora.py +49 -5
- sglang/srt/lora/lora_manager.py +20 -8
- sglang/srt/lora/mem_pool.py +24 -16
- sglang/srt/lora/utils.py +17 -13
- sglang/srt/managers/data_parallel_controller.py +13 -5
- sglang/srt/managers/eplb_algorithms/__init__.py +63 -0
- sglang/srt/managers/eplb_algorithms/deepseek.py +223 -0
- sglang/srt/managers/eplb_algorithms/deepseek_vec.py +276 -0
- sglang/srt/managers/eplb_manager.py +96 -0
- sglang/srt/managers/expert_distribution.py +878 -56
- sglang/srt/managers/expert_location.py +448 -0
- sglang/srt/managers/expert_location_dispatch.py +108 -0
- sglang/srt/managers/io_struct.py +29 -5
- sglang/srt/managers/mm_utils.py +355 -151
- sglang/srt/managers/multimodal_processors/base_processor.py +299 -42
- sglang/srt/managers/multimodal_processors/deepseek_vl_v2.py +6 -1
- sglang/srt/managers/multimodal_processors/gemma3.py +15 -17
- sglang/srt/managers/multimodal_processors/internvl.py +18 -5
- sglang/srt/managers/multimodal_processors/janus_pro.py +7 -1
- sglang/srt/managers/multimodal_processors/kimi_vl.py +14 -32
- sglang/srt/managers/multimodal_processors/llava.py +3 -3
- sglang/srt/managers/multimodal_processors/minicpm.py +27 -32
- sglang/srt/managers/multimodal_processors/mllama4.py +6 -0
- sglang/srt/managers/multimodal_processors/phi4mm.py +87 -0
- sglang/srt/managers/multimodal_processors/pixtral.py +9 -9
- sglang/srt/managers/multimodal_processors/qwen_vl.py +35 -35
- sglang/srt/managers/schedule_batch.py +185 -55
- sglang/srt/managers/schedule_policy.py +4 -5
- sglang/srt/managers/scheduler.py +389 -154
- sglang/srt/managers/session_controller.py +1 -1
- sglang/srt/managers/tokenizer_manager.py +231 -39
- sglang/srt/managers/utils.py +0 -4
- sglang/srt/mem_cache/base_prefix_cache.py +3 -0
- sglang/srt/mem_cache/chunk_cache.py +3 -1
- sglang/srt/mem_cache/hiradix_cache.py +4 -4
- sglang/srt/mem_cache/memory_pool.py +74 -52
- sglang/srt/mem_cache/multimodal_cache.py +45 -0
- sglang/srt/mem_cache/radix_cache.py +58 -5
- sglang/srt/metrics/collector.py +11 -2
- sglang/srt/mm_utils.py +10 -0
- sglang/srt/model_executor/cuda_graph_runner.py +87 -65
- sglang/srt/model_executor/expert_location_updater.py +557 -0
- sglang/srt/model_executor/forward_batch_info.py +39 -14
- sglang/srt/model_executor/model_runner.py +231 -101
- sglang/srt/model_loader/loader.py +10 -6
- sglang/srt/model_loader/utils.py +67 -1
- sglang/srt/models/clip.py +5 -1
- sglang/srt/models/deepseek_nextn.py +1 -1
- sglang/srt/models/deepseek_v2.py +732 -403
- sglang/srt/models/exaone.py +8 -3
- sglang/srt/models/gemma3_causal.py +7 -0
- sglang/srt/models/gemma3_mm.py +75 -33
- sglang/srt/models/idefics2.py +342 -0
- sglang/srt/models/kimi_vl.py +4 -4
- sglang/srt/models/llama.py +1 -1
- sglang/srt/models/llama4.py +10 -2
- sglang/srt/models/llava.py +26 -18
- sglang/srt/models/mimo_mtp.py +220 -0
- sglang/srt/models/minicpmo.py +7 -17
- sglang/srt/models/minicpmv.py +3 -295
- sglang/srt/models/mistral.py +71 -1
- sglang/srt/models/mllama.py +3 -3
- sglang/srt/models/phi4mm.py +512 -0
- sglang/srt/models/qwen2.py +133 -35
- sglang/srt/models/qwen2_5_vl.py +5 -3
- sglang/srt/models/qwen2_eagle.py +4 -1
- sglang/srt/models/qwen2_moe.py +206 -69
- sglang/srt/models/qwen2_vl.py +3 -3
- sglang/srt/models/qwen3.py +92 -19
- sglang/srt/models/qwen3_moe.py +457 -55
- sglang/srt/models/registry.py +9 -1
- sglang/srt/models/siglip.py +294 -0
- sglang/srt/models/transformers.py +291 -0
- sglang/srt/openai_api/adapter.py +114 -40
- sglang/srt/openai_api/protocol.py +37 -2
- sglang/srt/openai_api/utils.py +172 -0
- sglang/srt/operations.py +189 -0
- sglang/srt/operations_strategy.py +207 -0
- sglang/srt/sampling/sampling_batch_info.py +13 -1
- sglang/srt/sampling/sampling_params.py +2 -1
- sglang/srt/server_args.py +235 -38
- sglang/srt/speculative/build_eagle_tree.py +8 -8
- sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +8 -11
- sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py +253 -0
- sglang/srt/speculative/eagle_utils.py +181 -90
- sglang/srt/speculative/eagle_worker.py +146 -21
- sglang/srt/two_batch_overlap.py +635 -0
- sglang/srt/utils.py +197 -19
- sglang/test/runners.py +16 -7
- sglang/test/send_one.py +4 -0
- sglang/test/test_cutlass_moe.py +278 -0
- sglang/test/test_fp4_moe.py +248 -0
- sglang/test/test_utils.py +81 -42
- sglang/utils.py +2 -2
- sglang/version.py +1 -1
- {sglang-0.4.6.post4.dist-info → sglang-0.4.7.dist-info}/METADATA +31 -19
- sglang-0.4.7.dist-info/RECORD +699 -0
- {sglang-0.4.6.post4.dist-info → sglang-0.4.7.dist-info}/WHEEL +1 -1
- sglang/srt/function_call_parser.py +0 -858
- sglang/srt/platforms/interface.py +0 -371
- sglang-0.4.6.post4.dist-info/RECORD +0 -646
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=144,N=512,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=144,N=512,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1024,device_name=NVIDIA_H200.json → triton_3_1_0/E=16,N=1024,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json → triton_3_1_0/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=20,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=20,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=24,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=24,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_H20,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_H20,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=64,device_name=NVIDIA_L20,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=64,device_name=NVIDIA_L20,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=64,device_name=NVIDIA_L40S,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=64,device_name=NVIDIA_L40S,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=64,N=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=2560,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=64,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=14336,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=14336,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=14336,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=1792,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=1792,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=1792,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=3584,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=3584,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=3584,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_L40S.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_L40S.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=7168,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=7168,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=7168,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_H200.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H200.json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H200.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=96,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=96,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json → triton_3_2_0/E=264,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=264,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=264,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=264,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json → triton_3_2_0/E=272,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=272,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=272,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=64,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=272,N=64,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=288,N=64,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=288,N=64,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/models/{xiaomi_mimo.py → mimo.py} +0 -0
- {sglang-0.4.6.post4.dist-info → sglang-0.4.7.dist-info}/licenses/LICENSE +0 -0
- {sglang-0.4.6.post4.dist-info → sglang-0.4.7.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,370 @@
|
|
1
|
+
"""CUTLASS based Fused MoE kernels."""
|
2
|
+
|
3
|
+
import functools
|
4
|
+
import json
|
5
|
+
import logging
|
6
|
+
import os
|
7
|
+
from typing import Any, Callable, Dict, List, Optional, Tuple
|
8
|
+
|
9
|
+
import torch
|
10
|
+
|
11
|
+
from sglang.srt.layers.moe.cutlass_moe_params import CutlassMoEParams
|
12
|
+
from sglang.srt.utils import is_cuda
|
13
|
+
|
14
|
+
_is_cuda = is_cuda()
|
15
|
+
if _is_cuda:
|
16
|
+
import sgl_kernel
|
17
|
+
from sgl_kernel import (
|
18
|
+
apply_shuffle_mul_sum,
|
19
|
+
cutlass_fp4_group_mm,
|
20
|
+
fp8_blockwise_scaled_grouped_mm,
|
21
|
+
prepare_moe_input,
|
22
|
+
scaled_fp4_experts_quant,
|
23
|
+
shuffle_rows,
|
24
|
+
silu_and_mul,
|
25
|
+
)
|
26
|
+
|
27
|
+
|
28
|
+
def cutlass_fused_experts_fp8(
|
29
|
+
a: torch.Tensor,
|
30
|
+
w1_q: torch.Tensor,
|
31
|
+
w2_q: torch.Tensor,
|
32
|
+
w1_scale: torch.Tensor,
|
33
|
+
w2_scale: torch.Tensor,
|
34
|
+
topk_weights: torch.Tensor,
|
35
|
+
topk_ids: torch.Tensor,
|
36
|
+
a1_strides: torch.Tensor,
|
37
|
+
c1_strides: torch.Tensor,
|
38
|
+
a2_strides: torch.Tensor,
|
39
|
+
c2_strides: torch.Tensor,
|
40
|
+
workspace: torch.Tensor,
|
41
|
+
a_ptrs: torch.Tensor,
|
42
|
+
b_ptrs: torch.Tensor,
|
43
|
+
out_ptrs: torch.Tensor,
|
44
|
+
a_scales_ptrs: torch.Tensor,
|
45
|
+
b_scales_ptrs: torch.Tensor,
|
46
|
+
expert_offsets: torch.Tensor,
|
47
|
+
problem_sizes1: torch.Tensor,
|
48
|
+
problem_sizes2: torch.Tensor,
|
49
|
+
use_fp8_blockscale: bool = True,
|
50
|
+
) -> torch.Tensor:
|
51
|
+
"""Performs Fused MoE computation using CUTLASS-like kernels with FP8 weights and activations.
|
52
|
+
|
53
|
+
This function implements a Mixture of Experts (MoE) layer with a SwiGLU/SiLU
|
54
|
+
activation, leveraging custom kernels likely derived from CUTLASS principles
|
55
|
+
for grouped matrix multiplication (`fp8_blockwise_scaled_grouped_mm`) and
|
56
|
+
data preparation (`prepare_moe_input`, `silu_and_mul`).
|
57
|
+
|
58
|
+
It handles per-token routing, quantizes input activations to FP8 with
|
59
|
+
per-token scales, performs the expert computations using FP8 GEMMs with
|
60
|
+
pre-quantized FP8 weights (per-block scales), applies the SiLU activation,
|
61
|
+
and combines the results weighted by the router scores.
|
62
|
+
|
63
|
+
Args:
|
64
|
+
a (torch.Tensor): Input activations. Shape: `(m, k)`, where `m` is the total
|
65
|
+
number of tokens and `k` is the hidden size. Expected dtype: `torch.half`
|
66
|
+
or `torch.bfloat16`.
|
67
|
+
w1_q (torch.Tensor): Pre-quantized FP8 weight tensor for the first GEMM
|
68
|
+
(up-projection part of SwiGLU). Expected shape: `(E, k, n*2)`, where
|
69
|
+
`E` is the number of experts, `k` is the hidden size, and `n*2` is the
|
70
|
+
intermediate size (`I`). Expected dtype: `torch.float8_e4m3fn`.
|
71
|
+
Note: This shape implies weights are stored as (num_experts, hidden_size, intermediate_size).
|
72
|
+
w2_q (torch.Tensor): Pre-quantized FP8 weight tensor for the second GEMM
|
73
|
+
(down-projection). Expected shape: `(E, n, k)`, where `n` is half the
|
74
|
+
intermediate size (`I // 2`). Expected dtype: `torch.float8_e4m3fn`.
|
75
|
+
Note: This shape implies weights are stored as (num_experts, intermediate_size // 2, hidden_size).
|
76
|
+
w1_scale (torch.Tensor): Scales corresponding to `w1_q` (per-block scales).
|
77
|
+
Shape: `(E, num_blocks_n, num_blocks_k)`. Dtype: `torch.float32`.
|
78
|
+
w2_scale (torch.Tensor): Scales corresponding to `w2_q` (per-block scales).
|
79
|
+
Shape: `(E, num_blocks_k, num_blocks_n)`. Dtype: `torch.float32`.
|
80
|
+
topk_weights (torch.Tensor): Router weights for the selected top-k experts
|
81
|
+
for each token. Shape: `(m, topk)`. Dtype should ideally match `a`.
|
82
|
+
topk_ids (torch.Tensor): Indices of the selected top-k experts for each token.
|
83
|
+
Shape: `(m, topk)`. Dtype: `torch.int32`.
|
84
|
+
a1_strides (torch.Tensor): Stride information for the first GEMM's 'a' input.
|
85
|
+
Passed directly to the underlying kernel. Expected shape `(E,)`, dtype `torch.int64`.
|
86
|
+
Note: Its exact usage within `fp8_blockwise_scaled_grouped_mm` needs clarification
|
87
|
+
as it's passed as both a_stride and b_stride in the first call.
|
88
|
+
c1_strides (torch.Tensor): Stride information for the first GEMM's 'c' output.
|
89
|
+
Passed directly to the underlying kernel. Expected shape `(E,)`, dtype `torch.int64`.
|
90
|
+
a2_strides (torch.Tensor): Stride information for the second GEMM's 'a' input.
|
91
|
+
Passed directly to the underlying kernel. Expected shape `(E,)`, dtype `torch.int64`.
|
92
|
+
Note: Its exact usage within `fp8_blockwise_scaled_grouped_mm` needs clarification
|
93
|
+
as it's passed as both a_stride and b_stride in the second call.
|
94
|
+
c2_strides (torch.Tensor): Stride information for the second GEMM's 'c' output.
|
95
|
+
Passed directly to the underlying kernel. Expected shape `(E,)`, dtype `torch.int64`.
|
96
|
+
workspace (torch.Tensor): Reusable workspace for the underlying kernel.
|
97
|
+
a_ptrs (torch.Tensor): Pointers container for calculating offsets of the input activations for each expert.
|
98
|
+
b_ptrs (torch.Tensor): Pointers container for calculating offsets of the input weights for each expert.
|
99
|
+
out_ptrs (torch.Tensor): Pointers container for calculating offsets of the output activations for each expert.
|
100
|
+
a_scales_ptrs (torch.Tensor): Pointers container for calculating offsets of the input scales for each expert.
|
101
|
+
b_scales_ptrs (torch.Tensor): Pointers container for calculating offsets of the input scales for each expert.
|
102
|
+
use_fp8_blockscale (bool, optional): Flag indicating usage of FP8 with
|
103
|
+
block scaling. Currently, only `True` is supported. Defaults to `True`.
|
104
|
+
|
105
|
+
Returns:
|
106
|
+
torch.Tensor: The computed MoE layer output. Shape: `(m, k)`, dtype matches `a`.
|
107
|
+
|
108
|
+
Raises:
|
109
|
+
AssertionError: If input shapes, dtypes, or flags are inconsistent or unsupported.
|
110
|
+
NotImplementedError: If CUDA is not available or `sgl_kernel` is not properly installed.
|
111
|
+
"""
|
112
|
+
assert use_fp8_blockscale, "Only support fp8 blockscale for now"
|
113
|
+
assert topk_weights.shape == topk_ids.shape, "topk shape mismatch"
|
114
|
+
assert w1_q.dtype == torch.float8_e4m3fn
|
115
|
+
assert w2_q.dtype == torch.float8_e4m3fn
|
116
|
+
assert a.shape[1] == w1_q.shape[1], "Hidden size mismatch w1"
|
117
|
+
assert w1_q.shape[2] == w2_q.shape[1] * 2, "Hidden size mismatch w2"
|
118
|
+
assert w1_q.shape[0] == w2_q.shape[0], "Expert number mismatch"
|
119
|
+
assert w1_q.shape[0] == w2_q.shape[0], "Weights expert number mismatch"
|
120
|
+
assert w1_q.shape[0] == w1_scale.shape[0], "w1 scales expert number mismatch"
|
121
|
+
assert w1_q.shape[0] == w2_scale.shape[0], "w2 scales expert number mismatch"
|
122
|
+
assert a.dtype in [torch.half, torch.bfloat16], "Invalid output dtype"
|
123
|
+
|
124
|
+
if is_cuda:
|
125
|
+
from sglang.srt.layers.quantization.fp8_kernel import (
|
126
|
+
sglang_per_token_group_quant_fp8,
|
127
|
+
)
|
128
|
+
|
129
|
+
out_dtype = a.dtype
|
130
|
+
num_experts = w1_q.size(0)
|
131
|
+
m = a.size(0)
|
132
|
+
k = w1_q.size(1)
|
133
|
+
n = w2_q.size(1)
|
134
|
+
|
135
|
+
topk = topk_ids.size(1)
|
136
|
+
|
137
|
+
a_q, a1_scale = sglang_per_token_group_quant_fp8(a, 128)
|
138
|
+
device = a_q.device
|
139
|
+
|
140
|
+
a_map = torch.empty((topk_ids.numel()), dtype=torch.int32, device=device)
|
141
|
+
c_map = torch.empty((topk_ids.numel()), dtype=torch.int32, device=device)
|
142
|
+
|
143
|
+
prepare_moe_input(
|
144
|
+
topk_ids,
|
145
|
+
expert_offsets,
|
146
|
+
problem_sizes1,
|
147
|
+
problem_sizes2,
|
148
|
+
a_map,
|
149
|
+
c_map,
|
150
|
+
num_experts,
|
151
|
+
n,
|
152
|
+
k,
|
153
|
+
)
|
154
|
+
|
155
|
+
rep_a_q = shuffle_rows(a_q, a_map, (m * topk, k))
|
156
|
+
rep_a1_scales = shuffle_rows(a1_scale, a_map, (m * topk, int(k / 128)))
|
157
|
+
|
158
|
+
c1 = torch.empty((m * topk, n * 2), device=device, dtype=out_dtype)
|
159
|
+
c2 = torch.empty((m * topk, k), device=device, dtype=out_dtype)
|
160
|
+
|
161
|
+
a_sf_layout = torch.empty((num_experts, 5), device=device, dtype=torch.int)
|
162
|
+
w_sf_layout = torch.empty((num_experts, 5), device=device, dtype=torch.int)
|
163
|
+
|
164
|
+
fp8_blockwise_scaled_grouped_mm(
|
165
|
+
c1,
|
166
|
+
a_ptrs,
|
167
|
+
b_ptrs,
|
168
|
+
out_ptrs,
|
169
|
+
a_scales_ptrs,
|
170
|
+
b_scales_ptrs,
|
171
|
+
rep_a_q,
|
172
|
+
w1_q,
|
173
|
+
rep_a1_scales,
|
174
|
+
w1_scale,
|
175
|
+
a1_strides,
|
176
|
+
a1_strides,
|
177
|
+
c1_strides,
|
178
|
+
a_sf_layout,
|
179
|
+
w_sf_layout,
|
180
|
+
problem_sizes1,
|
181
|
+
expert_offsets[:-1],
|
182
|
+
workspace,
|
183
|
+
)
|
184
|
+
|
185
|
+
intermediate = torch.empty((m * topk, n), device=device, dtype=out_dtype)
|
186
|
+
silu_and_mul(c1, intermediate)
|
187
|
+
|
188
|
+
intemediate_q, a2_scale = sglang_per_token_group_quant_fp8(intermediate, 128)
|
189
|
+
|
190
|
+
fp8_blockwise_scaled_grouped_mm(
|
191
|
+
c2,
|
192
|
+
a_ptrs,
|
193
|
+
b_ptrs,
|
194
|
+
out_ptrs,
|
195
|
+
a_scales_ptrs,
|
196
|
+
b_scales_ptrs,
|
197
|
+
intemediate_q,
|
198
|
+
w2_q,
|
199
|
+
a2_scale,
|
200
|
+
w2_scale,
|
201
|
+
a2_strides,
|
202
|
+
a2_strides,
|
203
|
+
c2_strides,
|
204
|
+
a_sf_layout,
|
205
|
+
w_sf_layout,
|
206
|
+
problem_sizes2,
|
207
|
+
expert_offsets[:-1],
|
208
|
+
workspace,
|
209
|
+
)
|
210
|
+
|
211
|
+
result = torch.empty((m, k), device=device, dtype=out_dtype)
|
212
|
+
return apply_shuffle_mul_sum(c2, result, c_map, topk_weights)
|
213
|
+
|
214
|
+
|
215
|
+
FLOAT4_E2M1_MAX = 6.0
|
216
|
+
FLOAT8_E4M3_MAX = 448.0
|
217
|
+
|
218
|
+
|
219
|
+
def cutlass_moe_fp4(
|
220
|
+
a: torch.Tensor,
|
221
|
+
a1_gscale: torch.Tensor,
|
222
|
+
w1_fp4: torch.Tensor,
|
223
|
+
w1_blockscale: torch.Tensor,
|
224
|
+
w1_alphas: torch.Tensor,
|
225
|
+
a2_gscale: torch.Tensor,
|
226
|
+
w2_fp4: torch.Tensor,
|
227
|
+
w2_blockscale: torch.Tensor,
|
228
|
+
w2_alphas: torch.Tensor,
|
229
|
+
topk_weights: torch.Tensor,
|
230
|
+
topk_ids: torch.Tensor,
|
231
|
+
params: CutlassMoEParams,
|
232
|
+
apply_router_weight_on_input: bool = False,
|
233
|
+
):
|
234
|
+
"""
|
235
|
+
MoE implementation for FP4 Inputs
|
236
|
+
|
237
|
+
# Gemm 1
|
238
|
+
a: Input tensor: [m, k] (half/bfloat16)
|
239
|
+
a1_gscale: Activation scale per expert: [e] (float32)
|
240
|
+
w1(gate up) (not an argument to cutlass_moe_fp4): [e, 2 * n, k]
|
241
|
+
w1_fp4: [e, 2 * n, k // 2], dtype: torch.uint8 (stacked fp4: E2M1)
|
242
|
+
(Note: `n` is the up projection output dim, `k` is the input dim in
|
243
|
+
full precision)
|
244
|
+
w1_blockscale: [e, 2 * n, k // block_size] (float8_e4m3)
|
245
|
+
(Block size = 16 for NVFP4)
|
246
|
+
|
247
|
+
# Gemm 2
|
248
|
+
a2_gscale: Activation scale per expert: [e]
|
249
|
+
w2(down projection) (not an argument to cutlass_moe_fp4): [e, k, n]
|
250
|
+
w2_fp4: [e, k, n // 2], dtype: torch.uint8 (stacked E2M1)
|
251
|
+
w2_blockscale: [e, k, n // block_size], dtype: float8_e4m3
|
252
|
+
|
253
|
+
Strides for activations, weights and output in logical number of elements.
|
254
|
+
The activations & output stride is the number of elements to the next row.
|
255
|
+
The weights stride is the number of elements to the next row per expert.
|
256
|
+
For example, if the weight is [e, n, k], then the b_stride is a tensor of
|
257
|
+
shape [e] with each element being k. Similarly for activations, if the
|
258
|
+
shape is [m, k], then the a_stride has shape [e] with each value k.
|
259
|
+
Similarly for output, if the output is [m, n], then the c_stride is a
|
260
|
+
tensor of shape [e] with each element being k.
|
261
|
+
|
262
|
+
Note: cutlass_fp4_group_mm is designed to accept the strides of
|
263
|
+
activations and weights to be the same, so it is passed in as a single
|
264
|
+
tensor.
|
265
|
+
ab_strides_13: [e] dtype: int64 [Gemm 1: Activation / Weight strides]
|
266
|
+
ab_strides_2: [e] dtype: int64 [Gemm 2: Activation / Weight strides]
|
267
|
+
c_strides_13: [e] dtype: int64 [Gemm 1: Output Strides]
|
268
|
+
c_strides_2: [e] dtype: int64 [Gemm 1: Output Strides]
|
269
|
+
|
270
|
+
topk_weights: [m, topk] dtype: float8
|
271
|
+
topk_ids: [m, topk] dtype: float8
|
272
|
+
|
273
|
+
m, n, k: Unquantized weight shapes, dtype: int
|
274
|
+
e: number of experts for the current rank, dtype: int
|
275
|
+
assumes that topk < k < n to satisfy - up/down projection expectations.
|
276
|
+
"""
|
277
|
+
assert topk_weights.shape == topk_ids.shape, "topk shape mismatch"
|
278
|
+
assert w1_fp4.dtype == torch.uint8, "weight 1 must be uint8"
|
279
|
+
assert w2_fp4.dtype == torch.uint8, "weight 2 must be uint8"
|
280
|
+
assert (
|
281
|
+
w1_fp4.ndim == 3
|
282
|
+
and w2_fp4.ndim == 3
|
283
|
+
and w1_blockscale.ndim == 3
|
284
|
+
and w2_blockscale.ndim == 3
|
285
|
+
), "All Weights must be of rank 3 for cutlass_moe_fp4"
|
286
|
+
m_a, k_a = a.shape
|
287
|
+
e_w1, nx2_w1, half_k_w1 = w1_fp4.shape
|
288
|
+
e_w2, k_w2, half_n_w2 = w2_fp4.shape
|
289
|
+
|
290
|
+
assert e_w1 == e_w2 and e_w1 == params.num_experts, (
|
291
|
+
"Number of experts must match",
|
292
|
+
" between weights.",
|
293
|
+
)
|
294
|
+
assert (
|
295
|
+
k_a // 2 == half_k_w1 and params.hidden_size == k_w2
|
296
|
+
), "Hidden size mismatch between a, w1 and w2"
|
297
|
+
assert (
|
298
|
+
nx2_w1 == params.intermediate_size_per_partition * 2
|
299
|
+
and half_n_w2 == params.intermediate_size_per_partition // 2
|
300
|
+
), ("mismatch in " "expected `n`")
|
301
|
+
assert 2 * half_k_w1 == k_w2, "Hidden size mismatch w2 and w1"
|
302
|
+
assert a.dtype in [torch.half, torch.bfloat16], "Invalid input dtype"
|
303
|
+
|
304
|
+
out_dtype = a.dtype
|
305
|
+
num_topk = topk_ids.shape[1]
|
306
|
+
device = a.device
|
307
|
+
a_map = torch.empty((topk_ids.numel()), dtype=torch.int32, device=device)
|
308
|
+
c_map = torch.empty((topk_ids.numel()), dtype=torch.int32, device=device)
|
309
|
+
prepare_moe_input(
|
310
|
+
topk_ids,
|
311
|
+
params.expert_offsets,
|
312
|
+
params.problem_sizes1,
|
313
|
+
params.problem_sizes2,
|
314
|
+
a_map,
|
315
|
+
c_map,
|
316
|
+
params.num_experts,
|
317
|
+
params.intermediate_size_per_partition,
|
318
|
+
params.hidden_size,
|
319
|
+
params.blockscale_offsets,
|
320
|
+
)
|
321
|
+
|
322
|
+
rep_a_fp4, rep_a_blockscale = scaled_fp4_experts_quant(
|
323
|
+
a,
|
324
|
+
a1_gscale,
|
325
|
+
params.expert_offsets,
|
326
|
+
params.blockscale_offsets,
|
327
|
+
num_topk,
|
328
|
+
expert_map=a_map,
|
329
|
+
)
|
330
|
+
c1 = cutlass_fp4_group_mm(
|
331
|
+
rep_a_fp4,
|
332
|
+
w1_fp4,
|
333
|
+
rep_a_blockscale,
|
334
|
+
w1_blockscale,
|
335
|
+
w1_alphas,
|
336
|
+
out_dtype,
|
337
|
+
device,
|
338
|
+
params.to_gemm1_args(),
|
339
|
+
)
|
340
|
+
del rep_a_fp4, rep_a_blockscale
|
341
|
+
|
342
|
+
# hidden size dimension is split to one halfpytho sized tensor.
|
343
|
+
intermediate = torch.empty(
|
344
|
+
(m_a * num_topk, w1_fp4.shape[1] // 2), device=device, dtype=out_dtype
|
345
|
+
)
|
346
|
+
silu_and_mul(c1, intermediate)
|
347
|
+
|
348
|
+
int_fp4, int_blockscale = scaled_fp4_experts_quant(
|
349
|
+
intermediate,
|
350
|
+
a2_gscale,
|
351
|
+
params.expert_offsets,
|
352
|
+
params.blockscale_offsets,
|
353
|
+
num_topk,
|
354
|
+
)
|
355
|
+
c2 = cutlass_fp4_group_mm(
|
356
|
+
int_fp4,
|
357
|
+
w2_fp4,
|
358
|
+
int_blockscale,
|
359
|
+
w2_blockscale,
|
360
|
+
w2_alphas,
|
361
|
+
out_dtype,
|
362
|
+
device,
|
363
|
+
params.to_gemm2_args(),
|
364
|
+
)
|
365
|
+
del int_fp4, int_blockscale
|
366
|
+
c2 = shuffle_rows(c2, c_map, (m_a * num_topk, params.hidden_size))
|
367
|
+
c2 = c2.view(m_a, num_topk, params.hidden_size)
|
368
|
+
if not apply_router_weight_on_input:
|
369
|
+
c2 = c2 * topk_weights.view(m_a, num_topk, 1).to(out_dtype)
|
370
|
+
return c2.sum(dim=1).to(out_dtype)
|
@@ -0,0 +1,169 @@
|
|
1
|
+
from dataclasses import dataclass
|
2
|
+
from enum import Enum, auto
|
3
|
+
from typing import Optional
|
4
|
+
|
5
|
+
import torch
|
6
|
+
|
7
|
+
|
8
|
+
class CutlassMoEType(Enum):
|
9
|
+
"""
|
10
|
+
Enum for the different types of cutlass moe operations
|
11
|
+
that are currently supported in SGLang.
|
12
|
+
"""
|
13
|
+
|
14
|
+
BlockscaledFP8 = auto()
|
15
|
+
BlockscaledFP4 = auto()
|
16
|
+
|
17
|
+
|
18
|
+
@dataclass
|
19
|
+
class CutlassMoEParams:
|
20
|
+
"""
|
21
|
+
Parameters for the cutlass moe operation.
|
22
|
+
"""
|
23
|
+
|
24
|
+
# Type as defined above
|
25
|
+
cutlass_moe_type: CutlassMoEType
|
26
|
+
|
27
|
+
# Strides for activations, weights and output in logical number of elements.
|
28
|
+
# The activations & output stride is the number of elements to the next row.
|
29
|
+
# The weights stride is the number of elements to the next row per expert.
|
30
|
+
# For example, if the weight is [e, n, k], then the b_stride is a tensor of
|
31
|
+
# shape [e] with each element being k. Similarly for activations, if the
|
32
|
+
# shape is [m, k], then the a_stride has shape [e] with each value k.
|
33
|
+
# Similarly for output, if the output is [m, n], then the c_stride is a
|
34
|
+
# tensor of shape [e] with each element being k.
|
35
|
+
|
36
|
+
# Note: cutlass_fp4_group_mm is designed to accept the strides of
|
37
|
+
# activations and weights to be the same, so it is passed in as a single
|
38
|
+
# tensor.
|
39
|
+
# ab_strides_13: [e] dtype: int64 [Gemm 1: Activation / Weight strides]
|
40
|
+
# ab_strides_2: [e] dtype: int64 [Gemm 2: Activation / Weight strides]
|
41
|
+
# c_strides_13: [e] dtype: int64 [Gemm 1: Output Strides]
|
42
|
+
# c_strides_2: [e] dtype: int64 [Gemm 2: Output Strides]
|
43
|
+
ab_strides_13: torch.Tensor
|
44
|
+
ab_strides_2: torch.Tensor
|
45
|
+
c_strides_13: torch.Tensor
|
46
|
+
c_strides_2: torch.Tensor
|
47
|
+
|
48
|
+
# m: Total number of tokens
|
49
|
+
# n: intermediate size per partition
|
50
|
+
# k: hidden size per expert
|
51
|
+
# e: Number of experts
|
52
|
+
# device: Device to run computation on and store tensors
|
53
|
+
m: int
|
54
|
+
intermediate_size_per_partition: int
|
55
|
+
hidden_size: int
|
56
|
+
num_experts: int
|
57
|
+
device: torch.device
|
58
|
+
|
59
|
+
# Pointers container for calculating offsets of the input activations for each expert
|
60
|
+
# a_ptrs: [e] dtype: int64
|
61
|
+
a_ptrs: torch.Tensor
|
62
|
+
|
63
|
+
# Pointers container for calculating offsets of the input weights for each expert
|
64
|
+
# b_ptrs: [e] dtype: int64
|
65
|
+
b_ptrs: torch.Tensor
|
66
|
+
|
67
|
+
# Pointers container for calculating offsets of the output activations for each expert
|
68
|
+
# out_ptrs: [e] dtype: int64
|
69
|
+
out_ptrs: torch.Tensor
|
70
|
+
# Pointers container for calculating offsets of the input scales for each expert
|
71
|
+
# a_scales_ptrs: [e] dtype: int64
|
72
|
+
# b_scales_ptrs: [e] dtype: int64
|
73
|
+
a_scales_ptrs: torch.Tensor
|
74
|
+
b_scales_ptrs: torch.Tensor
|
75
|
+
|
76
|
+
# Offsets that mark at which token index each expert begins its computation
|
77
|
+
# The number of tokens computed with expert E is expert_offsets[E + 1] - expert_offsets[E]
|
78
|
+
# expert_offsets: [e+1] dtype: int32
|
79
|
+
expert_offsets: torch.Tensor
|
80
|
+
|
81
|
+
# Problem size: (num_experts, (m,2n,k)) for first GEMM
|
82
|
+
# problem_sizes1: [e, 3] dtype: int32
|
83
|
+
# Problem size: (num_experts, (m,n,k)) for second GEMM
|
84
|
+
# problem_sizes2: [e, 3] dtype: int32
|
85
|
+
problem_sizes1: torch.Tensor
|
86
|
+
problem_sizes2: torch.Tensor
|
87
|
+
# Similar to expert_offsets, but for blockscales for FP4 blockscaled Group GEMM
|
88
|
+
blockscale_offsets: Optional[torch.Tensor] = None
|
89
|
+
|
90
|
+
def __init__(
|
91
|
+
self,
|
92
|
+
cutlass_moe_type: CutlassMoEType,
|
93
|
+
device: torch.device,
|
94
|
+
num_experts: int,
|
95
|
+
intermediate_size_per_partition: int,
|
96
|
+
hidden_size: int,
|
97
|
+
):
|
98
|
+
self.cutlass_moe_type = cutlass_moe_type
|
99
|
+
self.device = device
|
100
|
+
self.num_experts = num_experts
|
101
|
+
self.intermediate_size_per_partition = intermediate_size_per_partition
|
102
|
+
self.hidden_size = hidden_size
|
103
|
+
self.n = self.intermediate_size_per_partition
|
104
|
+
self.k = self.hidden_size
|
105
|
+
self.e = self.num_experts
|
106
|
+
self.ab_strides_13 = torch.full(
|
107
|
+
(self.e,), self.k, dtype=torch.int64, device=self.device
|
108
|
+
)
|
109
|
+
self.ab_strides_2 = torch.full(
|
110
|
+
(self.e,), self.n, dtype=torch.int64, device=self.device
|
111
|
+
)
|
112
|
+
self.c_strides_13 = torch.full(
|
113
|
+
(self.e,), 2 * self.n, dtype=torch.int64, device=self.device
|
114
|
+
)
|
115
|
+
self.c_strides_2 = torch.full(
|
116
|
+
(self.e,), self.k, dtype=torch.int64, device=self.device
|
117
|
+
)
|
118
|
+
self.expert_offsets = torch.empty(
|
119
|
+
(self.e + 1,), dtype=torch.int32, device=self.device
|
120
|
+
)
|
121
|
+
self.problem_sizes1 = torch.empty(
|
122
|
+
(self.e, 3), dtype=torch.int32, device=self.device
|
123
|
+
)
|
124
|
+
self.problem_sizes2 = torch.empty(
|
125
|
+
(self.e, 3), dtype=torch.int32, device=self.device
|
126
|
+
)
|
127
|
+
if self.cutlass_moe_type == CutlassMoEType.BlockscaledFP4:
|
128
|
+
self.blockscale_offsets = torch.empty(
|
129
|
+
(self.e + 1,), dtype=torch.int32, device=self.device
|
130
|
+
)
|
131
|
+
else:
|
132
|
+
self.blockscale_offsets = None
|
133
|
+
self.a_ptrs = torch.empty((self.e,), dtype=torch.int64, device=self.device)
|
134
|
+
self.b_ptrs = torch.empty((self.e,), dtype=torch.int64, device=self.device)
|
135
|
+
self.out_ptrs = torch.empty((self.e,), dtype=torch.int64, device=self.device)
|
136
|
+
self.a_scales_ptrs = torch.empty(
|
137
|
+
(self.e,), dtype=torch.int64, device=self.device
|
138
|
+
)
|
139
|
+
self.b_scales_ptrs = torch.empty(
|
140
|
+
(self.e,), dtype=torch.int64, device=self.device
|
141
|
+
)
|
142
|
+
|
143
|
+
def to_gemm1_args(self) -> dict:
|
144
|
+
return {
|
145
|
+
"ab_strides": self.ab_strides_13,
|
146
|
+
"c_strides": self.c_strides_13,
|
147
|
+
"problem_sizes": self.problem_sizes1,
|
148
|
+
"expert_offsets": self.expert_offsets[:-1],
|
149
|
+
"blockscale_offsets": self.blockscale_offsets[:-1],
|
150
|
+
# "a_ptrs": self.a_ptrs,
|
151
|
+
# "b_ptrs": self.b_ptrs,
|
152
|
+
# "out_ptrs": self.out_ptrs,
|
153
|
+
# "a_scales_ptrs": self.a_scales_ptrs,
|
154
|
+
# "b_scales_ptrs": self.b_scales_ptrs,
|
155
|
+
}
|
156
|
+
|
157
|
+
def to_gemm2_args(self) -> dict:
|
158
|
+
return {
|
159
|
+
"ab_strides": self.ab_strides_2,
|
160
|
+
"c_strides": self.c_strides_2,
|
161
|
+
"problem_sizes": self.problem_sizes2,
|
162
|
+
"expert_offsets": self.expert_offsets[:-1],
|
163
|
+
"blockscale_offsets": self.blockscale_offsets[:-1],
|
164
|
+
# "a_ptrs": self.a_ptrs,
|
165
|
+
# "b_ptrs": self.b_ptrs,
|
166
|
+
# "out_ptrs": self.out_ptrs,
|
167
|
+
# "a_scales_ptrs": self.a_scales_ptrs,
|
168
|
+
# "b_scales_ptrs": self.b_scales_ptrs,
|
169
|
+
}
|