sglang 0.4.6.post4__py3-none-any.whl → 0.4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sglang/bench_offline_throughput.py +16 -10
- sglang/bench_one_batch.py +5 -4
- sglang/bench_one_batch_server.py +86 -22
- sglang/bench_serving.py +197 -110
- sglang/compile_deep_gemm.py +4 -4
- sglang/lang/backend/runtime_endpoint.py +24 -1
- sglang/profiler.py +167 -0
- sglang/srt/_custom_ops.py +34 -0
- sglang/srt/configs/internvl.py +8 -12
- sglang/srt/configs/model_config.py +66 -29
- sglang/srt/constrained/base_grammar_backend.py +5 -2
- sglang/srt/constrained/llguidance_backend.py +9 -8
- sglang/srt/constrained/outlines_backend.py +5 -4
- sglang/srt/constrained/xgrammar_backend.py +18 -18
- sglang/srt/conversation.py +47 -9
- sglang/srt/custom_op.py +38 -3
- sglang/srt/debug_utils.py +74 -0
- sglang/srt/disaggregation/common/__init__.py +1 -0
- sglang/srt/disaggregation/common/conn.py +407 -0
- sglang/srt/disaggregation/decode.py +187 -134
- sglang/srt/disaggregation/decode_schedule_batch_mixin.py +142 -0
- sglang/srt/disaggregation/fake/conn.py +4 -13
- sglang/srt/disaggregation/kv_events.py +412 -0
- sglang/srt/disaggregation/launch_lb.py +140 -0
- sglang/srt/disaggregation/mini_lb.py +84 -70
- sglang/srt/disaggregation/mooncake/conn.py +441 -140
- sglang/srt/disaggregation/mooncake/transfer_engine.py +31 -14
- sglang/srt/disaggregation/nixl/conn.py +124 -442
- sglang/srt/disaggregation/prefill.py +128 -44
- sglang/srt/disaggregation/utils.py +154 -6
- sglang/srt/distributed/device_communicators/pymscclpp.py +315 -0
- sglang/srt/distributed/parallel_state.py +52 -5
- sglang/srt/distributed/utils.py +3 -3
- sglang/srt/entrypoints/EngineBase.py +11 -0
- sglang/srt/entrypoints/engine.py +129 -12
- sglang/srt/entrypoints/http_server.py +21 -6
- sglang/srt/entrypoints/http_server_engine.py +5 -2
- sglang/srt/function_call/base_format_detector.py +302 -0
- sglang/srt/function_call/core_types.py +34 -0
- sglang/srt/function_call/deepseekv3_detector.py +205 -0
- sglang/srt/function_call/ebnf_composer.py +248 -0
- sglang/srt/function_call/function_call_parser.py +202 -0
- sglang/srt/function_call/llama32_detector.py +93 -0
- sglang/srt/function_call/mistral_detector.py +131 -0
- sglang/srt/function_call/pythonic_detector.py +229 -0
- sglang/srt/function_call/qwen25_detector.py +121 -0
- sglang/srt/function_call/utils.py +52 -0
- sglang/srt/hf_transformers_utils.py +50 -7
- sglang/srt/layers/attention/aiter_backend.py +878 -0
- sglang/srt/layers/attention/base_attn_backend.py +4 -0
- sglang/srt/layers/attention/cutlass_mla_backend.py +2 -19
- sglang/srt/layers/attention/flashattention_backend.py +166 -35
- sglang/srt/layers/attention/flashinfer_backend.py +45 -1
- sglang/srt/layers/attention/flashinfer_mla_backend.py +45 -5
- sglang/srt/layers/attention/flashmla_backend.py +340 -78
- sglang/srt/layers/attention/intel_amx_backend.py +128 -0
- sglang/srt/layers/attention/tbo_backend.py +232 -0
- sglang/srt/layers/attention/torch_native_backend.py +3 -0
- sglang/srt/layers/attention/triton_backend.py +247 -5
- sglang/srt/layers/attention/triton_ops/extend_attention.py +12 -4
- sglang/srt/layers/attention/utils.py +2 -2
- sglang/srt/layers/attention/vision.py +1 -1
- sglang/srt/layers/communicator.py +517 -0
- sglang/srt/layers/dp_attention.py +6 -15
- sglang/srt/layers/layernorm.py +30 -19
- sglang/srt/layers/moe/cutlass_moe.py +370 -0
- sglang/srt/layers/moe/cutlass_moe_params.py +169 -0
- sglang/srt/layers/moe/ep_moe/kernels.py +60 -17
- sglang/srt/layers/moe/ep_moe/layer.py +195 -87
- sglang/srt/layers/moe/ep_moe/token_dispatcher.py +88 -8
- sglang/srt/layers/moe/fused_moe_native.py +4 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=257,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/fused_moe.py +220 -25
- sglang/srt/layers/moe/fused_moe_triton/layer.py +48 -4
- sglang/srt/layers/moe/topk.py +107 -24
- sglang/srt/layers/multimodal.py +70 -0
- sglang/srt/layers/quantization/__init__.py +10 -4
- sglang/srt/layers/quantization/blockwise_int8.py +3 -0
- sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +5 -0
- sglang/srt/layers/quantization/deep_gemm.py +60 -59
- sglang/srt/layers/quantization/fp8.py +113 -18
- sglang/srt/layers/quantization/fp8_kernel.py +118 -66
- sglang/srt/layers/quantization/fp8_utils.py +165 -43
- sglang/srt/layers/quantization/gptq.py +298 -6
- sglang/srt/layers/quantization/int8_kernel.py +18 -5
- sglang/srt/layers/quantization/modelopt_quant.py +334 -7
- sglang/srt/layers/quantization/moe_wna16.py +3 -0
- sglang/srt/layers/quantization/qoq.py +244 -0
- sglang/srt/layers/quantization/w8a8_fp8.py +3 -0
- sglang/srt/layers/quantization/w8a8_int8.py +3 -0
- sglang/srt/layers/rotary_embedding.py +6 -12
- sglang/srt/layers/sampler.py +80 -79
- sglang/srt/layers/utils.py +6 -0
- sglang/srt/lora/layers.py +12 -15
- sglang/srt/lora/lora.py +49 -5
- sglang/srt/lora/lora_manager.py +20 -8
- sglang/srt/lora/mem_pool.py +24 -16
- sglang/srt/lora/utils.py +17 -13
- sglang/srt/managers/data_parallel_controller.py +13 -5
- sglang/srt/managers/eplb_algorithms/__init__.py +63 -0
- sglang/srt/managers/eplb_algorithms/deepseek.py +223 -0
- sglang/srt/managers/eplb_algorithms/deepseek_vec.py +276 -0
- sglang/srt/managers/eplb_manager.py +96 -0
- sglang/srt/managers/expert_distribution.py +878 -56
- sglang/srt/managers/expert_location.py +448 -0
- sglang/srt/managers/expert_location_dispatch.py +108 -0
- sglang/srt/managers/io_struct.py +29 -5
- sglang/srt/managers/mm_utils.py +355 -151
- sglang/srt/managers/multimodal_processors/base_processor.py +299 -42
- sglang/srt/managers/multimodal_processors/deepseek_vl_v2.py +6 -1
- sglang/srt/managers/multimodal_processors/gemma3.py +15 -17
- sglang/srt/managers/multimodal_processors/internvl.py +18 -5
- sglang/srt/managers/multimodal_processors/janus_pro.py +7 -1
- sglang/srt/managers/multimodal_processors/kimi_vl.py +14 -32
- sglang/srt/managers/multimodal_processors/llava.py +3 -3
- sglang/srt/managers/multimodal_processors/minicpm.py +27 -32
- sglang/srt/managers/multimodal_processors/mllama4.py +6 -0
- sglang/srt/managers/multimodal_processors/phi4mm.py +87 -0
- sglang/srt/managers/multimodal_processors/pixtral.py +9 -9
- sglang/srt/managers/multimodal_processors/qwen_vl.py +35 -35
- sglang/srt/managers/schedule_batch.py +185 -55
- sglang/srt/managers/schedule_policy.py +4 -5
- sglang/srt/managers/scheduler.py +389 -154
- sglang/srt/managers/session_controller.py +1 -1
- sglang/srt/managers/tokenizer_manager.py +231 -39
- sglang/srt/managers/utils.py +0 -4
- sglang/srt/mem_cache/base_prefix_cache.py +3 -0
- sglang/srt/mem_cache/chunk_cache.py +3 -1
- sglang/srt/mem_cache/hiradix_cache.py +4 -4
- sglang/srt/mem_cache/memory_pool.py +74 -52
- sglang/srt/mem_cache/multimodal_cache.py +45 -0
- sglang/srt/mem_cache/radix_cache.py +58 -5
- sglang/srt/metrics/collector.py +11 -2
- sglang/srt/mm_utils.py +10 -0
- sglang/srt/model_executor/cuda_graph_runner.py +87 -65
- sglang/srt/model_executor/expert_location_updater.py +557 -0
- sglang/srt/model_executor/forward_batch_info.py +39 -14
- sglang/srt/model_executor/model_runner.py +231 -101
- sglang/srt/model_loader/loader.py +10 -6
- sglang/srt/model_loader/utils.py +67 -1
- sglang/srt/models/clip.py +5 -1
- sglang/srt/models/deepseek_nextn.py +1 -1
- sglang/srt/models/deepseek_v2.py +732 -403
- sglang/srt/models/exaone.py +8 -3
- sglang/srt/models/gemma3_causal.py +7 -0
- sglang/srt/models/gemma3_mm.py +75 -33
- sglang/srt/models/idefics2.py +342 -0
- sglang/srt/models/kimi_vl.py +4 -4
- sglang/srt/models/llama.py +1 -1
- sglang/srt/models/llama4.py +10 -2
- sglang/srt/models/llava.py +26 -18
- sglang/srt/models/mimo_mtp.py +220 -0
- sglang/srt/models/minicpmo.py +7 -17
- sglang/srt/models/minicpmv.py +3 -295
- sglang/srt/models/mistral.py +71 -1
- sglang/srt/models/mllama.py +3 -3
- sglang/srt/models/phi4mm.py +512 -0
- sglang/srt/models/qwen2.py +133 -35
- sglang/srt/models/qwen2_5_vl.py +5 -3
- sglang/srt/models/qwen2_eagle.py +4 -1
- sglang/srt/models/qwen2_moe.py +206 -69
- sglang/srt/models/qwen2_vl.py +3 -3
- sglang/srt/models/qwen3.py +92 -19
- sglang/srt/models/qwen3_moe.py +457 -55
- sglang/srt/models/registry.py +9 -1
- sglang/srt/models/siglip.py +294 -0
- sglang/srt/models/transformers.py +291 -0
- sglang/srt/openai_api/adapter.py +114 -40
- sglang/srt/openai_api/protocol.py +37 -2
- sglang/srt/openai_api/utils.py +172 -0
- sglang/srt/operations.py +189 -0
- sglang/srt/operations_strategy.py +207 -0
- sglang/srt/sampling/sampling_batch_info.py +13 -1
- sglang/srt/sampling/sampling_params.py +2 -1
- sglang/srt/server_args.py +235 -38
- sglang/srt/speculative/build_eagle_tree.py +8 -8
- sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +8 -11
- sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py +253 -0
- sglang/srt/speculative/eagle_utils.py +181 -90
- sglang/srt/speculative/eagle_worker.py +146 -21
- sglang/srt/two_batch_overlap.py +635 -0
- sglang/srt/utils.py +197 -19
- sglang/test/runners.py +16 -7
- sglang/test/send_one.py +4 -0
- sglang/test/test_cutlass_moe.py +278 -0
- sglang/test/test_fp4_moe.py +248 -0
- sglang/test/test_utils.py +81 -42
- sglang/utils.py +2 -2
- sglang/version.py +1 -1
- {sglang-0.4.6.post4.dist-info → sglang-0.4.7.dist-info}/METADATA +31 -19
- sglang-0.4.7.dist-info/RECORD +699 -0
- {sglang-0.4.6.post4.dist-info → sglang-0.4.7.dist-info}/WHEEL +1 -1
- sglang/srt/function_call_parser.py +0 -858
- sglang/srt/platforms/interface.py +0 -371
- sglang-0.4.6.post4.dist-info/RECORD +0 -646
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=144,N=512,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=144,N=512,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1024,device_name=NVIDIA_H200.json → triton_3_1_0/E=16,N=1024,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json → triton_3_1_0/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=20,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=20,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=24,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=24,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_H20,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_H20,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=64,device_name=NVIDIA_L20,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=64,device_name=NVIDIA_L20,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=64,device_name=NVIDIA_L40S,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=64,device_name=NVIDIA_L40S,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=64,N=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=2560,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=64,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=14336,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=14336,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=14336,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=1792,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=1792,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=1792,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=3584,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=3584,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=3584,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_L40S.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_L40S.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=7168,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=7168,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=7168,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_H200.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H200.json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H200.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=96,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=96,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json → triton_3_2_0/E=264,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=264,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=264,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=264,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json → triton_3_2_0/E=272,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=272,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=272,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=64,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=272,N=64,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=288,N=64,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=288,N=64,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/models/{xiaomi_mimo.py → mimo.py} +0 -0
- {sglang-0.4.6.post4.dist-info → sglang-0.4.7.dist-info}/licenses/LICENSE +0 -0
- {sglang-0.4.6.post4.dist-info → sglang-0.4.7.dist-info}/top_level.txt +0 -0
@@ -8,7 +8,7 @@ Enable speculative sampling in FlashMLA
|
|
8
8
|
"""
|
9
9
|
|
10
10
|
from dataclasses import dataclass
|
11
|
-
from typing import TYPE_CHECKING, Optional, Union
|
11
|
+
from typing import TYPE_CHECKING, Callable, Optional, Tuple, Union
|
12
12
|
|
13
13
|
import torch
|
14
14
|
import triton
|
@@ -30,8 +30,8 @@ if TYPE_CHECKING:
|
|
30
30
|
|
31
31
|
# FlashMLA only supports pagesize=64
|
32
32
|
PAGE_SIZE = 64
|
33
|
-
|
34
|
-
|
33
|
+
|
34
|
+
# FlashMLA FP8 issue: https://github.com/deepseek-ai/FlashMLA/issues/56
|
35
35
|
|
36
36
|
|
37
37
|
@dataclass
|
@@ -52,7 +52,7 @@ class FlashMLADecodeMetadata:
|
|
52
52
|
|
53
53
|
|
54
54
|
class FlashMLABackend(FlashInferMLAAttnBackend):
|
55
|
-
"""
|
55
|
+
"""Flashmla attention kernels."""
|
56
56
|
|
57
57
|
def __init__(
|
58
58
|
self,
|
@@ -82,42 +82,72 @@ class FlashMLABackend(FlashInferMLAAttnBackend):
|
|
82
82
|
self.q_data_type = model_runner.dtype
|
83
83
|
self.kv_cache_dim = self.kv_lora_rank + self.qk_rope_head_dim
|
84
84
|
|
85
|
+
self.num_draft_tokens = model_runner.server_args.speculative_num_draft_tokens
|
86
|
+
|
85
87
|
def init_forward_metadata(self, forward_batch: ForwardBatch):
|
86
88
|
|
87
89
|
bs = forward_batch.batch_size
|
88
|
-
spec_info = forward_batch.spec_info
|
89
90
|
if forward_batch.forward_mode.is_decode_or_idle():
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
91
|
+
max_seqlen_pad = triton.cdiv(
|
92
|
+
forward_batch.seq_lens_cpu.max().item(), PAGE_SIZE
|
93
|
+
)
|
94
|
+
block_kv_indices = torch.full(
|
95
|
+
(bs, max_seqlen_pad),
|
96
|
+
-1,
|
97
|
+
dtype=torch.int32,
|
98
|
+
device=forward_batch.seq_lens.device,
|
99
|
+
)
|
100
|
+
create_flashmla_kv_indices_triton[(bs,)](
|
101
|
+
self.req_to_token,
|
102
|
+
forward_batch.req_pool_indices,
|
103
|
+
forward_batch.seq_lens,
|
104
|
+
None,
|
105
|
+
block_kv_indices,
|
106
|
+
self.req_to_token.stride(0),
|
107
|
+
max_seqlen_pad,
|
108
|
+
)
|
109
|
+
mla_metadata, num_splits = get_mla_metadata(
|
110
|
+
forward_batch.seq_lens.to(torch.int32),
|
111
|
+
self.num_q_heads,
|
112
|
+
1,
|
113
|
+
)
|
114
|
+
self.forward_metadata = FlashMLADecodeMetadata(
|
115
|
+
mla_metadata,
|
116
|
+
num_splits,
|
117
|
+
block_kv_indices,
|
118
|
+
)
|
119
|
+
elif forward_batch.forward_mode.is_target_verify():
|
120
|
+
seq_lens_cpu = forward_batch.seq_lens_cpu + self.num_draft_tokens
|
121
|
+
seq_lens = forward_batch.seq_lens + self.num_draft_tokens
|
122
|
+
|
123
|
+
max_seqlen_pad = triton.cdiv(seq_lens_cpu.max().item(), PAGE_SIZE)
|
124
|
+
block_kv_indices = torch.full(
|
125
|
+
(bs, max_seqlen_pad),
|
126
|
+
-1,
|
127
|
+
dtype=torch.int32,
|
128
|
+
device=seq_lens.device,
|
129
|
+
)
|
130
|
+
create_flashmla_kv_indices_triton[(bs,)](
|
131
|
+
self.req_to_token,
|
132
|
+
forward_batch.req_pool_indices,
|
133
|
+
seq_lens,
|
134
|
+
None,
|
135
|
+
block_kv_indices,
|
136
|
+
self.req_to_token.stride(0),
|
137
|
+
max_seqlen_pad,
|
138
|
+
)
|
139
|
+
mla_metadata, num_splits = get_mla_metadata(
|
140
|
+
seq_lens.to(torch.int32),
|
141
|
+
self.num_draft_tokens * self.num_q_heads,
|
142
|
+
1,
|
143
|
+
)
|
144
|
+
|
145
|
+
# Use FlashMLADecodeMetadata which has the attributes forward_extend expects
|
146
|
+
self.forward_metadata = FlashMLADecodeMetadata(
|
147
|
+
mla_metadata,
|
148
|
+
num_splits,
|
149
|
+
block_kv_indices,
|
150
|
+
)
|
121
151
|
else:
|
122
152
|
super().init_forward_metadata(forward_batch)
|
123
153
|
|
@@ -136,11 +166,22 @@ class FlashMLABackend(FlashInferMLAAttnBackend):
|
|
136
166
|
else:
|
137
167
|
cuda_graph_kv_indices = block_kv_indices
|
138
168
|
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
169
|
+
if self.num_draft_tokens:
|
170
|
+
self.cuda_graph_mla_metadata, self.cuda_graph_num_splits = get_mla_metadata(
|
171
|
+
torch.ones(
|
172
|
+
max_bs, dtype=torch.int32, device=cuda_graph_kv_indices.device
|
173
|
+
),
|
174
|
+
self.num_draft_tokens * self.num_q_heads,
|
175
|
+
1,
|
176
|
+
)
|
177
|
+
else:
|
178
|
+
self.cuda_graph_mla_metadata, self.cuda_graph_num_splits = get_mla_metadata(
|
179
|
+
torch.ones(
|
180
|
+
max_bs, dtype=torch.int32, device=cuda_graph_kv_indices.device
|
181
|
+
),
|
182
|
+
self.num_q_heads,
|
183
|
+
1,
|
184
|
+
)
|
144
185
|
self.cuda_graph_kv_indices = cuda_graph_kv_indices
|
145
186
|
|
146
187
|
def init_forward_metadata_capture_cuda_graph(
|
@@ -154,31 +195,54 @@ class FlashMLABackend(FlashInferMLAAttnBackend):
|
|
154
195
|
spec_info: Optional[SpecInfo],
|
155
196
|
):
|
156
197
|
if forward_mode.is_decode_or_idle():
|
157
|
-
|
158
|
-
max_seqlen_pad = triton.cdiv(seq_lens.max().item(), PAGE_SIZE)
|
159
|
-
|
160
|
-
create_flashmla_kv_indices_triton[(bs,)](
|
161
|
-
self.req_to_token,
|
162
|
-
req_pool_indices,
|
163
|
-
seq_lens,
|
164
|
-
None,
|
165
|
-
self.cuda_graph_kv_indices,
|
166
|
-
self.req_to_token.stride(0),
|
167
|
-
self.cuda_graph_kv_indices.stride(0),
|
168
|
-
)
|
169
|
-
mla_metadata, num_splits = get_mla_metadata(
|
170
|
-
seq_lens.to(torch.int32),
|
171
|
-
Q_LEN * self.num_q_heads,
|
172
|
-
1,
|
173
|
-
)
|
174
|
-
self.cuda_graph_mla_metadata.copy_(mla_metadata)
|
175
|
-
self.cuda_graph_num_splits[: bs + 1].copy_(num_splits)
|
176
|
-
self.forward_metadata = FlashMLADecodeMetadata(
|
177
|
-
self.cuda_graph_mla_metadata,
|
178
|
-
self.cuda_graph_num_splits[: bs + 1],
|
179
|
-
self.cuda_graph_kv_indices[:bs, :max_seqlen_pad],
|
180
|
-
)
|
198
|
+
max_seqlen_pad = triton.cdiv(seq_lens.max().item(), PAGE_SIZE)
|
181
199
|
|
200
|
+
create_flashmla_kv_indices_triton[(bs,)](
|
201
|
+
self.req_to_token,
|
202
|
+
req_pool_indices,
|
203
|
+
seq_lens,
|
204
|
+
None,
|
205
|
+
self.cuda_graph_kv_indices,
|
206
|
+
self.req_to_token.stride(0),
|
207
|
+
self.cuda_graph_kv_indices.stride(0),
|
208
|
+
)
|
209
|
+
mla_metadata, num_splits = get_mla_metadata(
|
210
|
+
seq_lens.to(torch.int32),
|
211
|
+
self.num_q_heads,
|
212
|
+
1,
|
213
|
+
)
|
214
|
+
self.cuda_graph_mla_metadata.copy_(mla_metadata)
|
215
|
+
self.cuda_graph_num_splits[: bs + 1].copy_(num_splits)
|
216
|
+
self.forward_metadata = FlashMLADecodeMetadata(
|
217
|
+
self.cuda_graph_mla_metadata,
|
218
|
+
self.cuda_graph_num_splits[: bs + 1],
|
219
|
+
self.cuda_graph_kv_indices[:bs, :max_seqlen_pad],
|
220
|
+
)
|
221
|
+
elif forward_mode.is_target_verify():
|
222
|
+
seq_lens = seq_lens + self.num_draft_tokens
|
223
|
+
max_seqlen_pad = triton.cdiv(seq_lens.max().item(), PAGE_SIZE)
|
224
|
+
|
225
|
+
create_flashmla_kv_indices_triton[(bs,)](
|
226
|
+
self.req_to_token,
|
227
|
+
req_pool_indices,
|
228
|
+
seq_lens,
|
229
|
+
None,
|
230
|
+
self.cuda_graph_kv_indices,
|
231
|
+
self.req_to_token.stride(0),
|
232
|
+
self.cuda_graph_kv_indices.stride(0),
|
233
|
+
)
|
234
|
+
mla_metadata, num_splits = get_mla_metadata(
|
235
|
+
seq_lens.to(torch.int32),
|
236
|
+
self.num_draft_tokens * self.num_q_heads,
|
237
|
+
1,
|
238
|
+
)
|
239
|
+
self.cuda_graph_mla_metadata.copy_(mla_metadata)
|
240
|
+
self.cuda_graph_num_splits[: bs + 1].copy_(num_splits)
|
241
|
+
self.forward_metadata = FlashMLADecodeMetadata(
|
242
|
+
self.cuda_graph_mla_metadata,
|
243
|
+
self.cuda_graph_num_splits[: bs + 1],
|
244
|
+
self.cuda_graph_kv_indices[:bs, :max_seqlen_pad],
|
245
|
+
)
|
182
246
|
else:
|
183
247
|
super().init_forward_metadata_capture_cuda_graph(
|
184
248
|
bs,
|
@@ -218,7 +282,32 @@ class FlashMLABackend(FlashInferMLAAttnBackend):
|
|
218
282
|
)
|
219
283
|
mla_metadata, num_splits = get_mla_metadata(
|
220
284
|
seq_lens.to(torch.int32),
|
221
|
-
|
285
|
+
self.num_q_heads,
|
286
|
+
1,
|
287
|
+
)
|
288
|
+
self.cuda_graph_mla_metadata.copy_(mla_metadata)
|
289
|
+
self.cuda_graph_num_splits[: bs + 1].copy_(num_splits)
|
290
|
+
self.forward_metadata.mla_metadata = self.cuda_graph_mla_metadata
|
291
|
+
self.forward_metadata.num_splits = self.cuda_graph_num_splits[: bs + 1]
|
292
|
+
self.forward_metadata.block_kv_indices = self.cuda_graph_kv_indices[
|
293
|
+
:bs, :max_seqlen_pad
|
294
|
+
]
|
295
|
+
elif forward_mode.is_target_verify():
|
296
|
+
seq_lens = seq_lens[:bs] + self.num_draft_tokens
|
297
|
+
seq_lens_cpu = seq_lens_cpu[:bs] + self.num_draft_tokens
|
298
|
+
max_seqlen_pad = triton.cdiv(seq_lens_cpu.max().item(), PAGE_SIZE)
|
299
|
+
create_flashmla_kv_indices_triton[(bs,)](
|
300
|
+
self.req_to_token,
|
301
|
+
req_pool_indices[:bs],
|
302
|
+
seq_lens,
|
303
|
+
None,
|
304
|
+
self.cuda_graph_kv_indices,
|
305
|
+
self.req_to_token.stride(0),
|
306
|
+
self.cuda_graph_kv_indices.stride(0),
|
307
|
+
)
|
308
|
+
mla_metadata, num_splits = get_mla_metadata(
|
309
|
+
seq_lens.to(torch.int32),
|
310
|
+
self.num_draft_tokens * self.num_q_heads,
|
222
311
|
1,
|
223
312
|
)
|
224
313
|
self.cuda_graph_mla_metadata.copy_(mla_metadata)
|
@@ -228,7 +317,6 @@ class FlashMLABackend(FlashInferMLAAttnBackend):
|
|
228
317
|
self.forward_metadata.block_kv_indices = self.cuda_graph_kv_indices[
|
229
318
|
:bs, :max_seqlen_pad
|
230
319
|
]
|
231
|
-
|
232
320
|
else:
|
233
321
|
super().init_forward_metadata_replay_cuda_graph(
|
234
322
|
bs,
|
@@ -268,17 +356,191 @@ class FlashMLABackend(FlashInferMLAAttnBackend):
|
|
268
356
|
k_cache = forward_batch.token_to_kv_pool.get_key_buffer(layer.layer_id)
|
269
357
|
|
270
358
|
reshape_q = q.view(bs, -1, layer.tp_q_head_num, layer.head_dim)
|
359
|
+
if self.data_type == torch.float8_e4m3fn:
|
360
|
+
reshape_q_fp8 = reshape_q.to(torch.float8_e4m3fn)
|
361
|
+
o, _ = flash_mla_with_kvcache(
|
362
|
+
q=reshape_q_fp8,
|
363
|
+
k_cache=k_cache.view(-1, PAGE_SIZE, 1, self.kv_cache_dim),
|
364
|
+
block_table=self.forward_metadata.block_kv_indices[:bs],
|
365
|
+
cache_seqlens=forward_batch.seq_lens.to(torch.int32),
|
366
|
+
head_dim_v=self.kv_lora_rank, # TODO Retrieve from config.
|
367
|
+
tile_scheduler_metadata=self.forward_metadata.flashmla_metadata,
|
368
|
+
num_splits=self.forward_metadata.num_splits,
|
369
|
+
softmax_scale=layer.scaling,
|
370
|
+
causal=True,
|
371
|
+
descale_q=torch.ones((1), dtype=torch.float32, device=reshape_q.device),
|
372
|
+
descale_k=torch.ones((1), dtype=torch.float32, device=reshape_q.device),
|
373
|
+
)
|
374
|
+
|
375
|
+
return o.view(-1, layer.tp_q_head_num * layer.v_head_dim)
|
376
|
+
else:
|
377
|
+
# todo: need check all causal True or False?
|
378
|
+
o, _ = flash_mla_with_kvcache(
|
379
|
+
q=reshape_q,
|
380
|
+
k_cache=k_cache.view(-1, PAGE_SIZE, 1, self.kv_cache_dim),
|
381
|
+
block_table=self.forward_metadata.block_kv_indices[:bs],
|
382
|
+
cache_seqlens=forward_batch.seq_lens.to(torch.int32),
|
383
|
+
head_dim_v=self.kv_lora_rank, # TODO Retrieve from config.
|
384
|
+
tile_scheduler_metadata=self.forward_metadata.flashmla_metadata,
|
385
|
+
num_splits=self.forward_metadata.num_splits,
|
386
|
+
softmax_scale=layer.scaling,
|
387
|
+
causal=True,
|
388
|
+
)
|
389
|
+
|
390
|
+
return o.view(-1, layer.tp_q_head_num * layer.v_head_dim)
|
391
|
+
|
392
|
+
def forward_extend(
|
393
|
+
self,
|
394
|
+
q: torch.Tensor,
|
395
|
+
k: torch.Tensor,
|
396
|
+
v: torch.Tensor,
|
397
|
+
layer: RadixAttention,
|
398
|
+
forward_batch: ForwardBatch,
|
399
|
+
save_kv_cache: bool = True,
|
400
|
+
):
|
401
|
+
if (
|
402
|
+
forward_batch.forward_mode == ForwardMode.EXTEND
|
403
|
+
or forward_batch.forward_mode == ForwardMode.DRAFT_EXTEND
|
404
|
+
):
|
405
|
+
return super().forward_extend(q, k, v, layer, forward_batch, save_kv_cache)
|
406
|
+
else:
|
407
|
+
cache_loc = forward_batch.out_cache_loc
|
408
|
+
|
409
|
+
if k is not None:
|
410
|
+
assert v is not None
|
411
|
+
if save_kv_cache:
|
412
|
+
forward_batch.token_to_kv_pool.set_kv_buffer(layer, cache_loc, k, v)
|
413
|
+
|
414
|
+
bs = forward_batch.batch_size
|
415
|
+
k_cache = forward_batch.token_to_kv_pool.get_key_buffer(layer.layer_id)
|
416
|
+
|
417
|
+
reshape_q = q.view(bs, -1, layer.tp_q_head_num, layer.head_dim)
|
418
|
+
if self.data_type == torch.float8_e4m3fn:
|
419
|
+
reshape_q_fp8 = reshape_q.to(torch.float8_e4m3fn)
|
420
|
+
o, _ = flash_mla_with_kvcache(
|
421
|
+
q=reshape_q_fp8,
|
422
|
+
k_cache=k_cache.view(-1, PAGE_SIZE, 1, self.kv_cache_dim),
|
423
|
+
block_table=self.forward_metadata.block_kv_indices[:bs],
|
424
|
+
cache_seqlens=forward_batch.seq_lens.to(torch.int32)
|
425
|
+
+ self.num_draft_tokens,
|
426
|
+
head_dim_v=self.kv_lora_rank,
|
427
|
+
tile_scheduler_metadata=self.forward_metadata.flashmla_metadata,
|
428
|
+
num_splits=self.forward_metadata.num_splits,
|
429
|
+
softmax_scale=layer.scaling,
|
430
|
+
causal=True,
|
431
|
+
descale_q=torch.ones(
|
432
|
+
(1), dtype=torch.float32, device=reshape_q.device
|
433
|
+
),
|
434
|
+
descale_k=torch.ones(
|
435
|
+
(1), dtype=torch.float32, device=reshape_q.device
|
436
|
+
),
|
437
|
+
)
|
438
|
+
else:
|
439
|
+
o, _ = flash_mla_with_kvcache(
|
440
|
+
q=reshape_q,
|
441
|
+
k_cache=k_cache.view(-1, PAGE_SIZE, 1, self.kv_cache_dim),
|
442
|
+
block_table=self.forward_metadata.block_kv_indices[:bs],
|
443
|
+
cache_seqlens=forward_batch.seq_lens.to(torch.int32)
|
444
|
+
+ self.num_draft_tokens,
|
445
|
+
head_dim_v=self.kv_lora_rank,
|
446
|
+
tile_scheduler_metadata=self.forward_metadata.flashmla_metadata,
|
447
|
+
num_splits=self.forward_metadata.num_splits,
|
448
|
+
softmax_scale=layer.scaling,
|
449
|
+
causal=True,
|
450
|
+
)
|
451
|
+
return o.view(-1, layer.tp_q_head_num * layer.v_head_dim)
|
452
|
+
|
271
453
|
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
454
|
+
# TODO: multi step kv indices optimization
|
455
|
+
class FlashMLAMultiStepDraftBackend:
|
456
|
+
"""
|
457
|
+
Wrap multiple flashmla attention backends as one for multiple consecutive
|
458
|
+
draft decoding steps.
|
459
|
+
"""
|
460
|
+
|
461
|
+
def __init__(
|
462
|
+
self,
|
463
|
+
model_runner: ModelRunner,
|
464
|
+
topk: int,
|
465
|
+
speculative_num_steps: int,
|
466
|
+
):
|
467
|
+
from sglang.srt.speculative.eagle_utils import generate_draft_decode_kv_indices
|
468
|
+
|
469
|
+
if topk > 1:
|
470
|
+
raise ValueError(
|
471
|
+
f"Currently FlashMLA only supports topk=1 for speculative decoding"
|
472
|
+
)
|
473
|
+
self.topk = topk
|
474
|
+
self.speculative_num_steps = speculative_num_steps
|
475
|
+
max_bs = model_runner.req_to_token_pool.size * self.topk
|
476
|
+
self.kv_indptr = torch.zeros(
|
477
|
+
(
|
478
|
+
self.speculative_num_steps,
|
479
|
+
max_bs + 1,
|
480
|
+
),
|
481
|
+
dtype=torch.int32,
|
482
|
+
device=model_runner.device,
|
282
483
|
)
|
283
484
|
|
284
|
-
|
485
|
+
self.attn_backends = []
|
486
|
+
for i in range(self.speculative_num_steps):
|
487
|
+
self.attn_backends.append(
|
488
|
+
FlashMLABackend(
|
489
|
+
model_runner,
|
490
|
+
skip_prefill=True,
|
491
|
+
kv_indptr_buf=self.kv_indptr[i],
|
492
|
+
kv_last_page_len_buf=None,
|
493
|
+
)
|
494
|
+
)
|
495
|
+
|
496
|
+
def common_template(
|
497
|
+
self,
|
498
|
+
forward_batch: ForwardBatch,
|
499
|
+
call_fn: Callable,
|
500
|
+
):
|
501
|
+
assert forward_batch.spec_info is not None
|
502
|
+
|
503
|
+
for i in range(self.speculative_num_steps - 1):
|
504
|
+
call_fn(i, forward_batch)
|
505
|
+
|
506
|
+
def init_forward_metadata(self, forward_batch: ForwardBatch):
|
507
|
+
def call_fn(i, forward_batch):
|
508
|
+
assert forward_batch.spec_info is not None
|
509
|
+
self.attn_backends[i].init_forward_metadata(forward_batch)
|
510
|
+
|
511
|
+
self.common_template(forward_batch, call_fn)
|
512
|
+
|
513
|
+
def init_cuda_graph_state(self, max_bs: int):
|
514
|
+
for i in range(self.speculative_num_steps):
|
515
|
+
self.attn_backends[i].init_cuda_graph_state(max_bs, block_kv_indices=None)
|
516
|
+
|
517
|
+
def init_forward_metadata_capture_cuda_graph(self, forward_batch: ForwardBatch):
|
518
|
+
def call_fn(i, forward_batch):
|
519
|
+
self.attn_backends[i].init_forward_metadata_capture_cuda_graph(
|
520
|
+
forward_batch.batch_size,
|
521
|
+
forward_batch.batch_size * self.topk,
|
522
|
+
forward_batch.req_pool_indices,
|
523
|
+
forward_batch.seq_lens,
|
524
|
+
encoder_lens=None,
|
525
|
+
forward_mode=ForwardMode.DECODE,
|
526
|
+
spec_info=forward_batch.spec_info,
|
527
|
+
)
|
528
|
+
|
529
|
+
self.common_template(forward_batch, call_fn)
|
530
|
+
|
531
|
+
def init_forward_metadata_replay_cuda_graph(
|
532
|
+
self, forward_batch: ForwardBatch, bs: int
|
533
|
+
):
|
534
|
+
def call_fn(i, forward_batch):
|
535
|
+
self.attn_backends[i].init_forward_metadata_replay_cuda_graph(
|
536
|
+
bs,
|
537
|
+
forward_batch.req_pool_indices,
|
538
|
+
forward_batch.seq_lens,
|
539
|
+
seq_lens_sum=-1,
|
540
|
+
encoder_lens=None,
|
541
|
+
forward_mode=ForwardMode.DECODE,
|
542
|
+
spec_info=forward_batch.spec_info,
|
543
|
+
seq_lens_cpu=forward_batch.seq_lens_cpu,
|
544
|
+
)
|
545
|
+
|
546
|
+
self.common_template(forward_batch, call_fn)
|
@@ -0,0 +1,128 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
from typing import TYPE_CHECKING
|
4
|
+
|
5
|
+
import torch
|
6
|
+
|
7
|
+
from sglang.srt.layers.attention.base_attn_backend import AttentionBackend
|
8
|
+
from sglang.srt.model_executor.forward_batch_info import ForwardBatch
|
9
|
+
|
10
|
+
if TYPE_CHECKING:
|
11
|
+
from sglang.srt.layers.radix_attention import RadixAttention
|
12
|
+
from sglang.srt.model_executor.model_runner import ModelRunner
|
13
|
+
|
14
|
+
|
15
|
+
class IntelAMXAttnBackend(AttentionBackend):
|
16
|
+
def __init__(self, model_runner: ModelRunner):
|
17
|
+
import sgl_kernel
|
18
|
+
|
19
|
+
super().__init__()
|
20
|
+
self.forward_metadata = None
|
21
|
+
self.device = model_runner.device
|
22
|
+
|
23
|
+
self.num_head = (
|
24
|
+
model_runner.model_config.num_attention_heads // model_runner.tp_size
|
25
|
+
)
|
26
|
+
|
27
|
+
self.v_head_dim = model_runner.token_to_kv_pool.get_value_buffer(0).shape[-1]
|
28
|
+
|
29
|
+
self.decode_attention_fwd = torch.ops.sgl_kernel.decode_attention_cpu
|
30
|
+
self.extend_attention_fwd = torch.ops.sgl_kernel.extend_attention_cpu
|
31
|
+
|
32
|
+
def init_forward_metadata(self, forward_batch: ForwardBatch):
|
33
|
+
"""Init the metadata for a forward pass."""
|
34
|
+
|
35
|
+
bs = forward_batch.batch_size
|
36
|
+
attn_logits = torch.zeros(
|
37
|
+
(
|
38
|
+
bs,
|
39
|
+
self.num_head,
|
40
|
+
8, # self.num_kv_splits,
|
41
|
+
self.v_head_dim + 1,
|
42
|
+
),
|
43
|
+
dtype=torch.float32,
|
44
|
+
device=self.device,
|
45
|
+
)
|
46
|
+
if forward_batch.forward_mode.is_decode_or_idle():
|
47
|
+
max_extend_len = None
|
48
|
+
else:
|
49
|
+
max_extend_len = torch.max(forward_batch.extend_seq_lens).item()
|
50
|
+
self.forward_metadata = (attn_logits, max_extend_len)
|
51
|
+
|
52
|
+
def forward_extend(
|
53
|
+
self,
|
54
|
+
q,
|
55
|
+
k,
|
56
|
+
v,
|
57
|
+
layer: RadixAttention,
|
58
|
+
forward_batch: ForwardBatch,
|
59
|
+
save_kv_cache=True,
|
60
|
+
):
|
61
|
+
if layer.qk_head_dim != layer.v_head_dim:
|
62
|
+
o = q.new_empty((q.shape[0], layer.tp_q_head_num * layer.v_head_dim))
|
63
|
+
else:
|
64
|
+
o = torch.empty_like(q)
|
65
|
+
|
66
|
+
if save_kv_cache:
|
67
|
+
forward_batch.token_to_kv_pool.set_kv_buffer(
|
68
|
+
layer, forward_batch.out_cache_loc, k, v
|
69
|
+
)
|
70
|
+
|
71
|
+
_, max_extend_len = self.forward_metadata
|
72
|
+
|
73
|
+
self.extend_attention_fwd(
|
74
|
+
q.view(-1, layer.tp_q_head_num, layer.qk_head_dim),
|
75
|
+
k,
|
76
|
+
v,
|
77
|
+
o.view(-1, layer.tp_q_head_num, layer.v_head_dim),
|
78
|
+
forward_batch.token_to_kv_pool.get_key_buffer(layer.layer_id),
|
79
|
+
forward_batch.token_to_kv_pool.get_value_buffer(layer.layer_id),
|
80
|
+
forward_batch.req_to_token_pool.req_to_token,
|
81
|
+
forward_batch.req_pool_indices,
|
82
|
+
forward_batch.seq_lens,
|
83
|
+
forward_batch.extend_seq_lens,
|
84
|
+
forward_batch.extend_start_loc,
|
85
|
+
max_extend_len,
|
86
|
+
layer.scaling,
|
87
|
+
layer.logit_cap,
|
88
|
+
)
|
89
|
+
return o
|
90
|
+
|
91
|
+
def forward_decode(
|
92
|
+
self,
|
93
|
+
q: torch.Tensor,
|
94
|
+
k: torch.Tensor,
|
95
|
+
v: torch.Tensor,
|
96
|
+
layer: RadixAttention,
|
97
|
+
forward_batch: ForwardBatch,
|
98
|
+
save_kv_cache=True,
|
99
|
+
):
|
100
|
+
attn_logits, _ = self.forward_metadata
|
101
|
+
|
102
|
+
q = q.reshape(-1, layer.tp_q_head_num * layer.qk_head_dim)
|
103
|
+
|
104
|
+
if layer.qk_head_dim != layer.v_head_dim:
|
105
|
+
o = q.new_empty((q.shape[0], layer.tp_q_head_num * layer.v_head_dim))
|
106
|
+
else:
|
107
|
+
o = torch.empty_like(q)
|
108
|
+
|
109
|
+
self.decode_attention_fwd(
|
110
|
+
q.view(-1, layer.tp_q_head_num, layer.qk_head_dim),
|
111
|
+
forward_batch.token_to_kv_pool.get_key_buffer(layer.layer_id),
|
112
|
+
forward_batch.token_to_kv_pool.get_value_buffer(layer.layer_id),
|
113
|
+
o.view(-1, layer.tp_q_head_num, layer.v_head_dim),
|
114
|
+
k,
|
115
|
+
v,
|
116
|
+
forward_batch.out_cache_loc,
|
117
|
+
attn_logits,
|
118
|
+
forward_batch.req_to_token_pool.req_to_token,
|
119
|
+
forward_batch.req_pool_indices,
|
120
|
+
forward_batch.seq_lens,
|
121
|
+
layer.scaling,
|
122
|
+
layer.logit_cap,
|
123
|
+
)
|
124
|
+
|
125
|
+
return o
|
126
|
+
|
127
|
+
def support_triton(self):
|
128
|
+
return False
|