sglang 0.4.6.post4__py3-none-any.whl → 0.4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sglang/bench_offline_throughput.py +16 -10
- sglang/bench_one_batch.py +5 -4
- sglang/bench_one_batch_server.py +86 -22
- sglang/bench_serving.py +197 -110
- sglang/compile_deep_gemm.py +4 -4
- sglang/lang/backend/runtime_endpoint.py +24 -1
- sglang/profiler.py +167 -0
- sglang/srt/_custom_ops.py +34 -0
- sglang/srt/configs/internvl.py +8 -12
- sglang/srt/configs/model_config.py +66 -29
- sglang/srt/constrained/base_grammar_backend.py +5 -2
- sglang/srt/constrained/llguidance_backend.py +9 -8
- sglang/srt/constrained/outlines_backend.py +5 -4
- sglang/srt/constrained/xgrammar_backend.py +18 -18
- sglang/srt/conversation.py +47 -9
- sglang/srt/custom_op.py +38 -3
- sglang/srt/debug_utils.py +74 -0
- sglang/srt/disaggregation/common/__init__.py +1 -0
- sglang/srt/disaggregation/common/conn.py +407 -0
- sglang/srt/disaggregation/decode.py +187 -134
- sglang/srt/disaggregation/decode_schedule_batch_mixin.py +142 -0
- sglang/srt/disaggregation/fake/conn.py +4 -13
- sglang/srt/disaggregation/kv_events.py +412 -0
- sglang/srt/disaggregation/launch_lb.py +140 -0
- sglang/srt/disaggregation/mini_lb.py +84 -70
- sglang/srt/disaggregation/mooncake/conn.py +441 -140
- sglang/srt/disaggregation/mooncake/transfer_engine.py +31 -14
- sglang/srt/disaggregation/nixl/conn.py +124 -442
- sglang/srt/disaggregation/prefill.py +128 -44
- sglang/srt/disaggregation/utils.py +154 -6
- sglang/srt/distributed/device_communicators/pymscclpp.py +315 -0
- sglang/srt/distributed/parallel_state.py +52 -5
- sglang/srt/distributed/utils.py +3 -3
- sglang/srt/entrypoints/EngineBase.py +11 -0
- sglang/srt/entrypoints/engine.py +129 -12
- sglang/srt/entrypoints/http_server.py +21 -6
- sglang/srt/entrypoints/http_server_engine.py +5 -2
- sglang/srt/function_call/base_format_detector.py +302 -0
- sglang/srt/function_call/core_types.py +34 -0
- sglang/srt/function_call/deepseekv3_detector.py +205 -0
- sglang/srt/function_call/ebnf_composer.py +248 -0
- sglang/srt/function_call/function_call_parser.py +202 -0
- sglang/srt/function_call/llama32_detector.py +93 -0
- sglang/srt/function_call/mistral_detector.py +131 -0
- sglang/srt/function_call/pythonic_detector.py +229 -0
- sglang/srt/function_call/qwen25_detector.py +121 -0
- sglang/srt/function_call/utils.py +52 -0
- sglang/srt/hf_transformers_utils.py +50 -7
- sglang/srt/layers/attention/aiter_backend.py +878 -0
- sglang/srt/layers/attention/base_attn_backend.py +4 -0
- sglang/srt/layers/attention/cutlass_mla_backend.py +2 -19
- sglang/srt/layers/attention/flashattention_backend.py +166 -35
- sglang/srt/layers/attention/flashinfer_backend.py +45 -1
- sglang/srt/layers/attention/flashinfer_mla_backend.py +45 -5
- sglang/srt/layers/attention/flashmla_backend.py +340 -78
- sglang/srt/layers/attention/intel_amx_backend.py +128 -0
- sglang/srt/layers/attention/tbo_backend.py +232 -0
- sglang/srt/layers/attention/torch_native_backend.py +3 -0
- sglang/srt/layers/attention/triton_backend.py +247 -5
- sglang/srt/layers/attention/triton_ops/extend_attention.py +12 -4
- sglang/srt/layers/attention/utils.py +2 -2
- sglang/srt/layers/attention/vision.py +1 -1
- sglang/srt/layers/communicator.py +517 -0
- sglang/srt/layers/dp_attention.py +6 -15
- sglang/srt/layers/layernorm.py +30 -19
- sglang/srt/layers/moe/cutlass_moe.py +370 -0
- sglang/srt/layers/moe/cutlass_moe_params.py +169 -0
- sglang/srt/layers/moe/ep_moe/kernels.py +60 -17
- sglang/srt/layers/moe/ep_moe/layer.py +195 -87
- sglang/srt/layers/moe/ep_moe/token_dispatcher.py +88 -8
- sglang/srt/layers/moe/fused_moe_native.py +4 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=257,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/fused_moe.py +220 -25
- sglang/srt/layers/moe/fused_moe_triton/layer.py +48 -4
- sglang/srt/layers/moe/topk.py +107 -24
- sglang/srt/layers/multimodal.py +70 -0
- sglang/srt/layers/quantization/__init__.py +10 -4
- sglang/srt/layers/quantization/blockwise_int8.py +3 -0
- sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +5 -0
- sglang/srt/layers/quantization/deep_gemm.py +60 -59
- sglang/srt/layers/quantization/fp8.py +113 -18
- sglang/srt/layers/quantization/fp8_kernel.py +118 -66
- sglang/srt/layers/quantization/fp8_utils.py +165 -43
- sglang/srt/layers/quantization/gptq.py +298 -6
- sglang/srt/layers/quantization/int8_kernel.py +18 -5
- sglang/srt/layers/quantization/modelopt_quant.py +334 -7
- sglang/srt/layers/quantization/moe_wna16.py +3 -0
- sglang/srt/layers/quantization/qoq.py +244 -0
- sglang/srt/layers/quantization/w8a8_fp8.py +3 -0
- sglang/srt/layers/quantization/w8a8_int8.py +3 -0
- sglang/srt/layers/rotary_embedding.py +6 -12
- sglang/srt/layers/sampler.py +80 -79
- sglang/srt/layers/utils.py +6 -0
- sglang/srt/lora/layers.py +12 -15
- sglang/srt/lora/lora.py +49 -5
- sglang/srt/lora/lora_manager.py +20 -8
- sglang/srt/lora/mem_pool.py +24 -16
- sglang/srt/lora/utils.py +17 -13
- sglang/srt/managers/data_parallel_controller.py +13 -5
- sglang/srt/managers/eplb_algorithms/__init__.py +63 -0
- sglang/srt/managers/eplb_algorithms/deepseek.py +223 -0
- sglang/srt/managers/eplb_algorithms/deepseek_vec.py +276 -0
- sglang/srt/managers/eplb_manager.py +96 -0
- sglang/srt/managers/expert_distribution.py +878 -56
- sglang/srt/managers/expert_location.py +448 -0
- sglang/srt/managers/expert_location_dispatch.py +108 -0
- sglang/srt/managers/io_struct.py +29 -5
- sglang/srt/managers/mm_utils.py +355 -151
- sglang/srt/managers/multimodal_processors/base_processor.py +299 -42
- sglang/srt/managers/multimodal_processors/deepseek_vl_v2.py +6 -1
- sglang/srt/managers/multimodal_processors/gemma3.py +15 -17
- sglang/srt/managers/multimodal_processors/internvl.py +18 -5
- sglang/srt/managers/multimodal_processors/janus_pro.py +7 -1
- sglang/srt/managers/multimodal_processors/kimi_vl.py +14 -32
- sglang/srt/managers/multimodal_processors/llava.py +3 -3
- sglang/srt/managers/multimodal_processors/minicpm.py +27 -32
- sglang/srt/managers/multimodal_processors/mllama4.py +6 -0
- sglang/srt/managers/multimodal_processors/phi4mm.py +87 -0
- sglang/srt/managers/multimodal_processors/pixtral.py +9 -9
- sglang/srt/managers/multimodal_processors/qwen_vl.py +35 -35
- sglang/srt/managers/schedule_batch.py +185 -55
- sglang/srt/managers/schedule_policy.py +4 -5
- sglang/srt/managers/scheduler.py +389 -154
- sglang/srt/managers/session_controller.py +1 -1
- sglang/srt/managers/tokenizer_manager.py +231 -39
- sglang/srt/managers/utils.py +0 -4
- sglang/srt/mem_cache/base_prefix_cache.py +3 -0
- sglang/srt/mem_cache/chunk_cache.py +3 -1
- sglang/srt/mem_cache/hiradix_cache.py +4 -4
- sglang/srt/mem_cache/memory_pool.py +74 -52
- sglang/srt/mem_cache/multimodal_cache.py +45 -0
- sglang/srt/mem_cache/radix_cache.py +58 -5
- sglang/srt/metrics/collector.py +11 -2
- sglang/srt/mm_utils.py +10 -0
- sglang/srt/model_executor/cuda_graph_runner.py +87 -65
- sglang/srt/model_executor/expert_location_updater.py +557 -0
- sglang/srt/model_executor/forward_batch_info.py +39 -14
- sglang/srt/model_executor/model_runner.py +231 -101
- sglang/srt/model_loader/loader.py +10 -6
- sglang/srt/model_loader/utils.py +67 -1
- sglang/srt/models/clip.py +5 -1
- sglang/srt/models/deepseek_nextn.py +1 -1
- sglang/srt/models/deepseek_v2.py +732 -403
- sglang/srt/models/exaone.py +8 -3
- sglang/srt/models/gemma3_causal.py +7 -0
- sglang/srt/models/gemma3_mm.py +75 -33
- sglang/srt/models/idefics2.py +342 -0
- sglang/srt/models/kimi_vl.py +4 -4
- sglang/srt/models/llama.py +1 -1
- sglang/srt/models/llama4.py +10 -2
- sglang/srt/models/llava.py +26 -18
- sglang/srt/models/mimo_mtp.py +220 -0
- sglang/srt/models/minicpmo.py +7 -17
- sglang/srt/models/minicpmv.py +3 -295
- sglang/srt/models/mistral.py +71 -1
- sglang/srt/models/mllama.py +3 -3
- sglang/srt/models/phi4mm.py +512 -0
- sglang/srt/models/qwen2.py +133 -35
- sglang/srt/models/qwen2_5_vl.py +5 -3
- sglang/srt/models/qwen2_eagle.py +4 -1
- sglang/srt/models/qwen2_moe.py +206 -69
- sglang/srt/models/qwen2_vl.py +3 -3
- sglang/srt/models/qwen3.py +92 -19
- sglang/srt/models/qwen3_moe.py +457 -55
- sglang/srt/models/registry.py +9 -1
- sglang/srt/models/siglip.py +294 -0
- sglang/srt/models/transformers.py +291 -0
- sglang/srt/openai_api/adapter.py +114 -40
- sglang/srt/openai_api/protocol.py +37 -2
- sglang/srt/openai_api/utils.py +172 -0
- sglang/srt/operations.py +189 -0
- sglang/srt/operations_strategy.py +207 -0
- sglang/srt/sampling/sampling_batch_info.py +13 -1
- sglang/srt/sampling/sampling_params.py +2 -1
- sglang/srt/server_args.py +235 -38
- sglang/srt/speculative/build_eagle_tree.py +8 -8
- sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +8 -11
- sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py +253 -0
- sglang/srt/speculative/eagle_utils.py +181 -90
- sglang/srt/speculative/eagle_worker.py +146 -21
- sglang/srt/two_batch_overlap.py +635 -0
- sglang/srt/utils.py +197 -19
- sglang/test/runners.py +16 -7
- sglang/test/send_one.py +4 -0
- sglang/test/test_cutlass_moe.py +278 -0
- sglang/test/test_fp4_moe.py +248 -0
- sglang/test/test_utils.py +81 -42
- sglang/utils.py +2 -2
- sglang/version.py +1 -1
- {sglang-0.4.6.post4.dist-info → sglang-0.4.7.dist-info}/METADATA +31 -19
- sglang-0.4.7.dist-info/RECORD +699 -0
- {sglang-0.4.6.post4.dist-info → sglang-0.4.7.dist-info}/WHEEL +1 -1
- sglang/srt/function_call_parser.py +0 -858
- sglang/srt/platforms/interface.py +0 -371
- sglang-0.4.6.post4.dist-info/RECORD +0 -646
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=144,N=512,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=144,N=512,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1024,device_name=NVIDIA_H200.json → triton_3_1_0/E=16,N=1024,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json → triton_3_1_0/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=20,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=20,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=24,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=24,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_H20,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_H20,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=64,device_name=NVIDIA_L20,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=64,device_name=NVIDIA_L20,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=64,device_name=NVIDIA_L40S,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=64,device_name=NVIDIA_L40S,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=64,N=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=2560,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=64,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=14336,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=14336,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=14336,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=1792,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=1792,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=1792,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=3584,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=3584,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=3584,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_L40S.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_L40S.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=7168,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=7168,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=7168,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_H200.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H200.json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H200.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=96,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=96,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json → triton_3_2_0/E=264,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=264,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=264,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=264,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json → triton_3_2_0/E=272,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=272,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=272,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=64,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=272,N=64,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=288,N=64,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=288,N=64,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/models/{xiaomi_mimo.py → mimo.py} +0 -0
- {sglang-0.4.6.post4.dist-info → sglang-0.4.7.dist-info}/licenses/LICENSE +0 -0
- {sglang-0.4.6.post4.dist-info → sglang-0.4.7.dist-info}/top_level.txt +0 -0
@@ -157,7 +157,7 @@ class CutlassMLABackend(FlashInferMLAAttnBackend):
|
|
157
157
|
):
|
158
158
|
if forward_mode.is_decode_or_idle():
|
159
159
|
if spec_info is None:
|
160
|
-
max_seqlen_pad =
|
160
|
+
max_seqlen_pad = self.cuda_graph_kv_indices.shape[1]
|
161
161
|
|
162
162
|
create_flashmla_kv_indices_triton[(bs,)](
|
163
163
|
self.req_to_token,
|
@@ -169,12 +169,6 @@ class CutlassMLABackend(FlashInferMLAAttnBackend):
|
|
169
169
|
self.cuda_graph_kv_indices.stride(0),
|
170
170
|
PAGE_SIZE,
|
171
171
|
)
|
172
|
-
workspace_size = cutlass_mla_get_workspace_size(
|
173
|
-
max_seqlen_pad * PAGE_SIZE, bs
|
174
|
-
)
|
175
|
-
self.cuda_graph_mla_workspace = torch.empty(
|
176
|
-
workspace_size, device="cuda", dtype=torch.uint8
|
177
|
-
)
|
178
172
|
self.forward_metadata = CutlassMLADecodeMetadata(
|
179
173
|
self.cuda_graph_mla_workspace,
|
180
174
|
self.cuda_graph_kv_indices[:bs, :max_seqlen_pad],
|
@@ -205,8 +199,7 @@ class CutlassMLABackend(FlashInferMLAAttnBackend):
|
|
205
199
|
if forward_mode.is_decode_or_idle():
|
206
200
|
assert seq_lens_cpu is not None
|
207
201
|
seq_lens = seq_lens[:bs]
|
208
|
-
|
209
|
-
max_seqlen_pad = triton.cdiv(seq_lens_cpu.max().item(), PAGE_SIZE)
|
202
|
+
|
210
203
|
create_flashmla_kv_indices_triton[(bs,)](
|
211
204
|
self.req_to_token,
|
212
205
|
req_pool_indices[:bs],
|
@@ -217,16 +210,6 @@ class CutlassMLABackend(FlashInferMLAAttnBackend):
|
|
217
210
|
self.cuda_graph_kv_indices.stride(0),
|
218
211
|
PAGE_SIZE,
|
219
212
|
)
|
220
|
-
workspace_size = cutlass_mla_get_workspace_size(
|
221
|
-
max_seqlen_pad * PAGE_SIZE, bs
|
222
|
-
)
|
223
|
-
self.cuda_graph_mla_workspace = torch.empty(
|
224
|
-
workspace_size, device="cuda", dtype=torch.uint8
|
225
|
-
)
|
226
|
-
self.forward_metadata.workspace = self.cuda_graph_mla_workspace
|
227
|
-
self.forward_metadata.block_kv_indices = self.cuda_graph_kv_indices[
|
228
|
-
:bs, :max_seqlen_pad
|
229
|
-
]
|
230
213
|
else:
|
231
214
|
super().init_forward_metadata_replay_cuda_graph(
|
232
215
|
bs,
|
@@ -11,6 +11,7 @@ from sglang.srt.layers.attention.base_attn_backend import AttentionBackend
|
|
11
11
|
from sglang.srt.managers.schedule_batch import global_server_args_dict
|
12
12
|
from sglang.srt.model_executor.forward_batch_info import ForwardBatch, ForwardMode
|
13
13
|
from sglang.srt.speculative.eagle_utils import EagleDraftInput, EagleVerifyInput
|
14
|
+
from sglang.srt.utils import get_compiler_backend
|
14
15
|
|
15
16
|
if TYPE_CHECKING:
|
16
17
|
from sglang.srt.layers.radix_attention import RadixAttention
|
@@ -918,8 +919,11 @@ class FlashAttentionBackend(AttentionBackend):
|
|
918
919
|
and local_attn_metadata is not None
|
919
920
|
and (hasattr(layer, "use_irope") and layer.use_irope)
|
920
921
|
)
|
921
|
-
|
922
|
-
|
922
|
+
|
923
|
+
# When Spec Decode enabled, forward_decode would be called with two mode:
|
924
|
+
# 1. DRAFT_DECODE: we enable cascade attention when top_k > 1
|
925
|
+
# 2. IDLE: we don’t need cascade attention, spec_info will be none in this case
|
926
|
+
use_cascade_attn = forward_batch.spec_info is not None and self.topk > 1
|
923
927
|
|
924
928
|
# Calculate window size (can be moved to metadata if layer properties don't change)
|
925
929
|
# we don't do layer.sliding_window_size - 1 since in model.get_attention_sliding_window_size() we already - 1
|
@@ -1165,7 +1169,6 @@ class FlashAttentionBackend(AttentionBackend):
|
|
1165
1169
|
max_virtual_batches = max_bs * (
|
1166
1170
|
(max_seq_len + attn_chunk_size - 1) // attn_chunk_size
|
1167
1171
|
)
|
1168
|
-
max_blocks_per_seq = (max_seq_len + attn_chunk_size - 1) // attn_chunk_size
|
1169
1172
|
max_pages_per_block = (attn_chunk_size + page_size - 1) // page_size
|
1170
1173
|
|
1171
1174
|
self.decode_cuda_graph_local_attn_metadata = {
|
@@ -1177,7 +1180,7 @@ class FlashAttentionBackend(AttentionBackend):
|
|
1177
1180
|
),
|
1178
1181
|
"local_block_table": torch.zeros(
|
1179
1182
|
max_virtual_batches,
|
1180
|
-
|
1183
|
+
max_pages_per_block,
|
1181
1184
|
dtype=torch.int32,
|
1182
1185
|
device=self.device,
|
1183
1186
|
),
|
@@ -1266,6 +1269,29 @@ class FlashAttentionBackend(AttentionBackend):
|
|
1266
1269
|
),
|
1267
1270
|
}
|
1268
1271
|
|
1272
|
+
self.draft_extend_metadata = {
|
1273
|
+
"cache_seqlens": torch.zeros(
|
1274
|
+
max_bs, dtype=torch.int32, device=self.device
|
1275
|
+
),
|
1276
|
+
"cu_seqlens_q": torch.zeros(
|
1277
|
+
max_bs + 1,
|
1278
|
+
dtype=torch.int32,
|
1279
|
+
device=self.device,
|
1280
|
+
),
|
1281
|
+
"cu_seqlens_k": torch.zeros(
|
1282
|
+
max_bs + 1, dtype=torch.int32, device=self.device
|
1283
|
+
),
|
1284
|
+
"page_table": torch.zeros(
|
1285
|
+
max_bs,
|
1286
|
+
(self.max_context_len + self.page_size - 1) // self.page_size,
|
1287
|
+
dtype=torch.int32,
|
1288
|
+
device=self.device,
|
1289
|
+
),
|
1290
|
+
"strided_indices": torch.arange(
|
1291
|
+
0, self.max_context_len, self.page_size, device=self.device
|
1292
|
+
),
|
1293
|
+
}
|
1294
|
+
|
1269
1295
|
if self.topk > 1:
|
1270
1296
|
self.target_verify_metadata_topk_normal = {
|
1271
1297
|
"cache_seqlens": torch.zeros(
|
@@ -1435,19 +1461,7 @@ class FlashAttentionBackend(AttentionBackend):
|
|
1435
1461
|
self.decode_cuda_graph_metadata[bs] = metadata
|
1436
1462
|
|
1437
1463
|
if self.attention_chunk_size is not None:
|
1438
|
-
metadata
|
1439
|
-
local_query_start_loc=self.decode_cuda_graph_local_attn_metadata[
|
1440
|
-
"local_query_start_loc"
|
1441
|
-
],
|
1442
|
-
local_seqused_k=self.decode_cuda_graph_local_attn_metadata[
|
1443
|
-
"local_seqused_k"
|
1444
|
-
],
|
1445
|
-
local_block_table=self.decode_cuda_graph_local_attn_metadata[
|
1446
|
-
"local_block_table"
|
1447
|
-
],
|
1448
|
-
local_max_query_len=1,
|
1449
|
-
local_max_seq_len=1,
|
1450
|
-
)
|
1464
|
+
self._update_local_attn_metadata_for_capture(metadata, batch_size)
|
1451
1465
|
|
1452
1466
|
elif forward_mode.is_target_verify():
|
1453
1467
|
if self.topk <= 1:
|
@@ -1518,6 +1532,32 @@ class FlashAttentionBackend(AttentionBackend):
|
|
1518
1532
|
|
1519
1533
|
self.target_verify_metadata_topk_normal[bs] = metadata
|
1520
1534
|
self.target_verify_metadata_topk_expand[bs] = metadata_expand
|
1535
|
+
elif forward_mode.is_draft_extend():
|
1536
|
+
metadata.cache_seqlens_int32 = self.draft_extend_metadata["cache_seqlens"][
|
1537
|
+
:bs
|
1538
|
+
]
|
1539
|
+
metadata.cache_seqlens_int32.copy_(seq_lens.to(torch.int32))
|
1540
|
+
|
1541
|
+
num_tokens_per_bs = num_tokens // bs
|
1542
|
+
metadata.max_seq_len_q = num_tokens_per_bs
|
1543
|
+
metadata.max_seq_len_k = seq_lens.max().item()
|
1544
|
+
|
1545
|
+
metadata.cu_seqlens_q = torch.arange(
|
1546
|
+
0,
|
1547
|
+
bs * num_tokens_per_bs + 1,
|
1548
|
+
num_tokens_per_bs,
|
1549
|
+
dtype=torch.int32,
|
1550
|
+
device=device,
|
1551
|
+
)
|
1552
|
+
|
1553
|
+
metadata.cu_seqlens_k = self.draft_extend_metadata["cu_seqlens_k"][
|
1554
|
+
: (bs + 1)
|
1555
|
+
]
|
1556
|
+
metadata.page_table = self.draft_extend_metadata["page_table"][
|
1557
|
+
req_pool_indices, :
|
1558
|
+
]
|
1559
|
+
|
1560
|
+
self.draft_extend_metadata[bs] = metadata
|
1521
1561
|
|
1522
1562
|
if encoder_lens is not None:
|
1523
1563
|
encoder_bs = encoder_lens.numel()
|
@@ -1618,30 +1658,22 @@ class FlashAttentionBackend(AttentionBackend):
|
|
1618
1658
|
)
|
1619
1659
|
# TODO: Handle local attention metadata for draft decode when llama4 eagle is supported
|
1620
1660
|
else:
|
1621
|
-
metadata = self.decode_cuda_graph_metadata[bs]
|
1622
1661
|
# Normal Decode
|
1662
|
+
metadata = self.decode_cuda_graph_metadata[bs]
|
1623
1663
|
max_len = seq_lens_cpu.max().item()
|
1664
|
+
max_seq_pages = (max_len + self.page_size - 1) // self.page_size
|
1624
1665
|
metadata.max_seq_len_k = max_len
|
1625
1666
|
|
1626
|
-
|
1627
|
-
|
1628
|
-
|
1629
|
-
|
1667
|
+
normal_decode_set_medadata(
|
1668
|
+
metadata,
|
1669
|
+
self.req_to_token,
|
1670
|
+
req_pool_indices,
|
1671
|
+
self.decode_cuda_graph_metadata["strided_indices"],
|
1672
|
+
max_seq_pages,
|
1673
|
+
seq_lens,
|
1674
|
+
self.page_size,
|
1630
1675
|
)
|
1631
1676
|
|
1632
|
-
max_seq_pages = (
|
1633
|
-
metadata.max_seq_len_k + self.page_size - 1
|
1634
|
-
) // self.page_size
|
1635
|
-
page_indices = self.req_to_token[
|
1636
|
-
req_pool_indices[:, None],
|
1637
|
-
self.decode_cuda_graph_metadata["strided_indices"][:max_seq_pages][
|
1638
|
-
None, :
|
1639
|
-
],
|
1640
|
-
]
|
1641
|
-
page_indices //= self.page_size
|
1642
|
-
metadata.page_table[:, :max_seq_pages].copy_(page_indices)
|
1643
|
-
metadata.page_table[:, max_seq_pages:].fill_(0)
|
1644
|
-
|
1645
1677
|
self._update_local_attn_metadata_for_replay(metadata, bs)
|
1646
1678
|
elif forward_mode.is_target_verify():
|
1647
1679
|
if self.topk <= 1:
|
@@ -1742,6 +1774,29 @@ class FlashAttentionBackend(AttentionBackend):
|
|
1742
1774
|
metadata_expand.max_seq_len_k = (
|
1743
1775
|
metadata_expand.cache_seqlens_int32.max().item()
|
1744
1776
|
)
|
1777
|
+
elif forward_mode.is_draft_extend():
|
1778
|
+
metadata = self.draft_extend_metadata[bs]
|
1779
|
+
metadata.cache_seqlens_int32.copy_(seq_lens.to(torch.int32))
|
1780
|
+
|
1781
|
+
metadata.max_seq_len_k = seq_lens_cpu.max().item()
|
1782
|
+
metadata.cu_seqlens_k[1:].copy_(
|
1783
|
+
torch.cumsum(metadata.cache_seqlens_int32, dim=0, dtype=torch.int32)
|
1784
|
+
)
|
1785
|
+
accept_length = spec_info.accept_length[:bs]
|
1786
|
+
metadata.max_seq_len_q = accept_length.max().item()
|
1787
|
+
metadata.cu_seqlens_q[1:].copy_(
|
1788
|
+
torch.cumsum(accept_length, dim=0, dtype=torch.int32)
|
1789
|
+
)
|
1790
|
+
|
1791
|
+
max_seq_pages = (
|
1792
|
+
metadata.max_seq_len_k + self.page_size - 1
|
1793
|
+
) // self.page_size
|
1794
|
+
page_indices = self.req_to_token[
|
1795
|
+
req_pool_indices[:, None],
|
1796
|
+
self.draft_extend_metadata["strided_indices"][:max_seq_pages],
|
1797
|
+
]
|
1798
|
+
page_indices //= self.page_size
|
1799
|
+
metadata.page_table[:, :max_seq_pages].copy_(page_indices)
|
1745
1800
|
|
1746
1801
|
if encoder_lens is not None:
|
1747
1802
|
# Only support encoder size 1 for now
|
@@ -1808,6 +1863,62 @@ class FlashAttentionBackend(AttentionBackend):
|
|
1808
1863
|
)
|
1809
1864
|
metadata.local_attn_metadata = local_metadata
|
1810
1865
|
|
1866
|
+
def _update_local_attn_metadata_for_capture(
|
1867
|
+
self, metadata: FlashAttentionMetadata, bs: int
|
1868
|
+
):
|
1869
|
+
"""Update local attention metadata during CUDA graph capture phase.
|
1870
|
+
|
1871
|
+
This method calculates the exact buffer sizes needed for local attention metadata
|
1872
|
+
during the CUDA graph capture phase, optimizing memory usage by creating views of
|
1873
|
+
pre-allocated buffers with exactly the sizes needed.
|
1874
|
+
"""
|
1875
|
+
seq_lens_capture = metadata.cache_seqlens_int32
|
1876
|
+
max_seq_len = int(seq_lens_capture.max().item())
|
1877
|
+
page_table_capture = metadata.page_table
|
1878
|
+
|
1879
|
+
cu_seqlens_q_np = metadata.cu_seqlens_q.cpu().numpy()
|
1880
|
+
seqlens_np = seq_lens_capture.cpu().numpy()
|
1881
|
+
(
|
1882
|
+
seqlens_q_local_np,
|
1883
|
+
cu_seqlens_q_local_np,
|
1884
|
+
seqlens_k_local_np,
|
1885
|
+
block_table_local_np,
|
1886
|
+
) = make_local_attention_virtual_batches(
|
1887
|
+
self.attention_chunk_size,
|
1888
|
+
cu_seqlens_q_np,
|
1889
|
+
seqlens_np,
|
1890
|
+
page_table_capture,
|
1891
|
+
self.page_size,
|
1892
|
+
)
|
1893
|
+
|
1894
|
+
# Get exact dimensions from the calculation
|
1895
|
+
q_len = len(cu_seqlens_q_local_np)
|
1896
|
+
k_len = len(seqlens_k_local_np)
|
1897
|
+
b0 = block_table_local_np.shape[0] if block_table_local_np.shape[0] > 0 else bs
|
1898
|
+
b1 = block_table_local_np.shape[1] if block_table_local_np.shape[1] > 0 else 1
|
1899
|
+
|
1900
|
+
# Create views of the pre-allocated buffers with exactly these sizes
|
1901
|
+
# This is the key optimization - we only use the memory we actually need
|
1902
|
+
local_query_start_loc = self.decode_cuda_graph_local_attn_metadata[
|
1903
|
+
"local_query_start_loc"
|
1904
|
+
][:q_len]
|
1905
|
+
|
1906
|
+
local_seqused_k = self.decode_cuda_graph_local_attn_metadata["local_seqused_k"][
|
1907
|
+
:k_len
|
1908
|
+
]
|
1909
|
+
|
1910
|
+
local_block_table = self.decode_cuda_graph_local_attn_metadata[
|
1911
|
+
"local_block_table"
|
1912
|
+
][:b0, :b1]
|
1913
|
+
|
1914
|
+
metadata.local_attn_metadata = FlashAttentionMetadata.LocalAttentionMetadata(
|
1915
|
+
local_query_start_loc=local_query_start_loc,
|
1916
|
+
local_seqused_k=local_seqused_k,
|
1917
|
+
local_block_table=local_block_table,
|
1918
|
+
local_max_query_len=1,
|
1919
|
+
local_max_seq_len=max_seq_len,
|
1920
|
+
)
|
1921
|
+
|
1811
1922
|
def _update_local_attn_metadata_for_replay(
|
1812
1923
|
self, metadata: FlashAttentionMetadata, bs: int
|
1813
1924
|
):
|
@@ -1945,3 +2056,23 @@ class FlashAttentionMultiStepBackend:
|
|
1945
2056
|
seq_lens_cpu=forward_batch.seq_lens_cpu,
|
1946
2057
|
out_cache_loc=forward_batch.out_cache_loc,
|
1947
2058
|
)
|
2059
|
+
|
2060
|
+
|
2061
|
+
@torch.compile(dynamic=True, backend=get_compiler_backend())
|
2062
|
+
def normal_decode_set_medadata(
|
2063
|
+
metadata,
|
2064
|
+
req_to_token,
|
2065
|
+
req_pool_indices,
|
2066
|
+
strided_indices,
|
2067
|
+
max_seq_pages,
|
2068
|
+
seq_lens,
|
2069
|
+
page_size,
|
2070
|
+
):
|
2071
|
+
metadata.cache_seqlens_int32 = seq_lens.to(torch.int32)
|
2072
|
+
metadata.cu_seqlens_k[1:].copy_(torch.cumsum(seq_lens, dim=0, dtype=torch.int32))
|
2073
|
+
page_indices = req_to_token[
|
2074
|
+
req_pool_indices[:, None],
|
2075
|
+
strided_indices[:max_seq_pages][None, :],
|
2076
|
+
]
|
2077
|
+
metadata.page_table[:, :max_seq_pages].copy_(page_indices // page_size)
|
2078
|
+
metadata.page_table[:, max_seq_pages:].fill_(0)
|
@@ -25,6 +25,7 @@ from sglang.global_config import global_config
|
|
25
25
|
from sglang.srt.layers.attention.base_attn_backend import AttentionBackend
|
26
26
|
from sglang.srt.layers.attention.utils import create_flashinfer_kv_indices_triton
|
27
27
|
from sglang.srt.layers.dp_attention import get_attention_tp_size
|
28
|
+
from sglang.srt.layers.utils import is_sm100_supported
|
28
29
|
from sglang.srt.model_executor.forward_batch_info import ForwardBatch, ForwardMode
|
29
30
|
from sglang.srt.speculative.eagle_utils import EagleDraftInput, EagleVerifyInput
|
30
31
|
from sglang.srt.utils import is_flashinfer_available, next_power_of_2
|
@@ -149,8 +150,11 @@ class FlashInferAttnBackend(AttentionBackend):
|
|
149
150
|
for _ in range(self.num_wrappers)
|
150
151
|
]
|
151
152
|
|
153
|
+
fmha_backend = "auto"
|
154
|
+
if is_sm100_supported():
|
155
|
+
fmha_backend = "cutlass"
|
152
156
|
self.prefill_wrapper_ragged = BatchPrefillWithRaggedKVCacheWrapper(
|
153
|
-
self.workspace_buffer, "NHD"
|
157
|
+
self.workspace_buffer, "NHD", backend=fmha_backend
|
154
158
|
)
|
155
159
|
|
156
160
|
# Two wrappers: one for sliding window attention and one for full attention.
|
@@ -358,6 +362,35 @@ class FlashInferAttnBackend(AttentionBackend):
|
|
358
362
|
)
|
359
363
|
self.prefill_cuda_graph_metadata[bs] = prefill_wrappers
|
360
364
|
self.forward_metadata = PrefillMetadata(prefill_wrappers, False, False)
|
365
|
+
elif forward_mode.is_draft_extend():
|
366
|
+
prefill_wrappers = []
|
367
|
+
for i in range(self.num_wrappers):
|
368
|
+
prefill_wrappers.append(
|
369
|
+
BatchPrefillWithPagedKVCacheWrapper(
|
370
|
+
self.workspace_buffer,
|
371
|
+
"NHD",
|
372
|
+
backend="fa2",
|
373
|
+
use_cuda_graph=True,
|
374
|
+
qo_indptr_buf=self.cuda_graph_qo_indptr[i][: bs + 1],
|
375
|
+
paged_kv_indptr_buf=self.kv_indptr[i][: bs + 1],
|
376
|
+
paged_kv_indices_buf=self.cuda_graph_kv_indices[i],
|
377
|
+
paged_kv_last_page_len_buf=self.kv_last_page_len[:bs],
|
378
|
+
)
|
379
|
+
)
|
380
|
+
|
381
|
+
seq_lens_sum = seq_lens.sum().item()
|
382
|
+
self.indices_updater_prefill.update(
|
383
|
+
req_pool_indices,
|
384
|
+
seq_lens,
|
385
|
+
seq_lens_sum,
|
386
|
+
prefix_lens=None,
|
387
|
+
prefill_wrappers=prefill_wrappers,
|
388
|
+
use_ragged=False,
|
389
|
+
encoder_lens=encoder_lens,
|
390
|
+
spec_info=spec_info,
|
391
|
+
)
|
392
|
+
self.prefill_cuda_graph_metadata[bs] = prefill_wrappers
|
393
|
+
self.forward_metadata = PrefillMetadata(prefill_wrappers, False, False)
|
361
394
|
else:
|
362
395
|
raise ValueError(f"Invalid mode: {forward_mode=}")
|
363
396
|
|
@@ -392,6 +425,17 @@ class FlashInferAttnBackend(AttentionBackend):
|
|
392
425
|
encoder_lens=encoder_lens[:bs] if encoder_lens is not None else None,
|
393
426
|
spec_info=spec_info,
|
394
427
|
)
|
428
|
+
elif forward_mode.is_draft_extend():
|
429
|
+
self.indices_updater_prefill.update(
|
430
|
+
req_pool_indices[:bs],
|
431
|
+
seq_lens[:bs],
|
432
|
+
seq_lens_sum,
|
433
|
+
prefix_lens=None,
|
434
|
+
prefill_wrappers=self.prefill_cuda_graph_metadata[bs],
|
435
|
+
use_ragged=False,
|
436
|
+
encoder_lens=encoder_lens[:bs] if encoder_lens is not None else None,
|
437
|
+
spec_info=spec_info,
|
438
|
+
)
|
395
439
|
else:
|
396
440
|
raise ValueError("Invalid forward mode")
|
397
441
|
|
@@ -29,6 +29,7 @@ from sglang.srt.layers.attention.flashinfer_backend import (
|
|
29
29
|
create_flashinfer_kv_indices_triton,
|
30
30
|
)
|
31
31
|
from sglang.srt.layers.dp_attention import get_attention_tp_size
|
32
|
+
from sglang.srt.layers.utils import is_sm100_supported
|
32
33
|
from sglang.srt.managers.schedule_batch import global_server_args_dict
|
33
34
|
from sglang.srt.model_executor.forward_batch_info import ForwardBatch, ForwardMode
|
34
35
|
from sglang.srt.speculative.eagle_utils import EagleDraftInput, EagleVerifyInput
|
@@ -108,8 +109,11 @@ class FlashInferMLAAttnBackend(AttentionBackend):
|
|
108
109
|
else:
|
109
110
|
self.q_indptr_decode = q_indptr_decode_buf
|
110
111
|
|
112
|
+
fmha_backend = "auto"
|
113
|
+
if is_sm100_supported():
|
114
|
+
fmha_backend = "cutlass"
|
111
115
|
self.prefill_wrapper_ragged = BatchPrefillWithRaggedKVCacheWrapper(
|
112
|
-
self.workspace_buffer, "NHD"
|
116
|
+
self.workspace_buffer, "NHD", backend=fmha_backend
|
113
117
|
)
|
114
118
|
|
115
119
|
if not self.skip_prefill:
|
@@ -278,6 +282,28 @@ class FlashInferMLAAttnBackend(AttentionBackend):
|
|
278
282
|
)
|
279
283
|
self.prefill_cuda_graph_metadata[bs] = verify_wrapper
|
280
284
|
self.forward_metadata = PrefillMetadata(verify_wrapper, False)
|
285
|
+
elif forward_mode.is_draft_extend():
|
286
|
+
draft_extend_wrapper = BatchMLAPagedAttentionWrapper(
|
287
|
+
self.workspace_buffer,
|
288
|
+
use_cuda_graph=True,
|
289
|
+
qo_indptr=self.cuda_graph_qo_indptr[: bs + 1],
|
290
|
+
kv_indptr=self.cuda_graph_kv_indptr[: bs + 1],
|
291
|
+
kv_indices=self.cuda_graph_kv_indices,
|
292
|
+
kv_len_arr=self.cuda_graph_kv_lens[:bs],
|
293
|
+
backend="auto",
|
294
|
+
)
|
295
|
+
seq_lens_sum = seq_lens.sum().item()
|
296
|
+
self.indices_updater_prefill.update(
|
297
|
+
req_pool_indices,
|
298
|
+
seq_lens,
|
299
|
+
seq_lens_sum,
|
300
|
+
prefix_lens=None,
|
301
|
+
prefill_wrapper_paged=draft_extend_wrapper,
|
302
|
+
use_ragged=False,
|
303
|
+
spec_info=spec_info,
|
304
|
+
)
|
305
|
+
self.prefill_cuda_graph_metadata[bs] = draft_extend_wrapper
|
306
|
+
self.forward_metadata = PrefillMetadata(draft_extend_wrapper, False)
|
281
307
|
else:
|
282
308
|
raise ValueError(f"Invalid mode: {forward_mode=}")
|
283
309
|
|
@@ -325,6 +351,16 @@ class FlashInferMLAAttnBackend(AttentionBackend):
|
|
325
351
|
use_ragged=False,
|
326
352
|
spec_info=spec_info,
|
327
353
|
)
|
354
|
+
elif forward_mode.is_draft_extend():
|
355
|
+
self.indices_updater_prefill.update(
|
356
|
+
req_pool_indices[:bs],
|
357
|
+
seq_lens[:bs],
|
358
|
+
seq_lens_sum,
|
359
|
+
prefix_lens=None,
|
360
|
+
prefill_wrapper_paged=self.prefill_cuda_graph_metadata[bs],
|
361
|
+
use_ragged=False,
|
362
|
+
spec_info=spec_info,
|
363
|
+
)
|
328
364
|
else:
|
329
365
|
raise ValueError(f"Invalid forward mode: {forward_mode=}")
|
330
366
|
|
@@ -346,7 +382,6 @@ class FlashInferMLAAttnBackend(AttentionBackend):
|
|
346
382
|
cache_loc = forward_batch.out_cache_loc
|
347
383
|
logits_soft_cap = layer.logit_cap
|
348
384
|
prefill_wrapper_paged = self.forward_metadata.prefill_wrapper
|
349
|
-
k_buf = forward_batch.token_to_kv_pool.get_key_buffer(layer.layer_id)
|
350
385
|
|
351
386
|
# Save kv cache
|
352
387
|
if save_kv_cache and k is not None:
|
@@ -381,6 +416,9 @@ class FlashInferMLAAttnBackend(AttentionBackend):
|
|
381
416
|
)
|
382
417
|
else:
|
383
418
|
# mla paged prefill
|
419
|
+
k_buf = forward_batch.token_to_kv_pool.get_key_buffer(layer.layer_id).to(
|
420
|
+
q.dtype
|
421
|
+
)
|
384
422
|
if q_rope is None:
|
385
423
|
qall = q.view(-1, layer.tp_q_head_num, layer.head_dim)
|
386
424
|
q, q_rope = (
|
@@ -442,7 +480,9 @@ class FlashInferMLAAttnBackend(AttentionBackend):
|
|
442
480
|
q_nope = reshaped_q[:, :, : layer.v_head_dim]
|
443
481
|
q_rope = reshaped_q[:, :, layer.v_head_dim :]
|
444
482
|
|
445
|
-
k_buffer = forward_batch.token_to_kv_pool.get_key_buffer(layer.layer_id)
|
483
|
+
k_buffer = forward_batch.token_to_kv_pool.get_key_buffer(layer.layer_id).to(
|
484
|
+
q.dtype
|
485
|
+
)
|
446
486
|
|
447
487
|
o = q_nope.new_empty(q_nope.shape)
|
448
488
|
# Direct call to run without the wrapper
|
@@ -467,7 +507,7 @@ class FlashInferMLAIndicesUpdaterDecode:
|
|
467
507
|
self.qk_nope_head_dim = model_runner.model_config.qk_nope_head_dim
|
468
508
|
self.qk_rope_head_dim = model_runner.model_config.qk_rope_head_dim
|
469
509
|
self.scaling = model_runner.model_config.scaling
|
470
|
-
self.data_type = model_runner.
|
510
|
+
self.data_type = model_runner.dtype
|
471
511
|
self.attn_backend = attn_backend
|
472
512
|
|
473
513
|
# Buffers and wrappers
|
@@ -577,7 +617,7 @@ class FlashInferMLAIndicesUpdaterPrefill:
|
|
577
617
|
self.qk_rope_head_dim = model_runner.model_config.qk_rope_head_dim
|
578
618
|
self.v_head_dim = model_runner.model_config.v_head_dim
|
579
619
|
self.scaling = model_runner.model_config.scaling
|
580
|
-
self.data_type = model_runner.
|
620
|
+
self.data_type = model_runner.dtype
|
581
621
|
self.q_data_type = model_runner.dtype
|
582
622
|
self.attn_backend = attn_backend
|
583
623
|
|