sglang 0.4.6.post5__py3-none-any.whl → 0.4.7.post1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sglang/__init__.py +2 -0
- sglang/api.py +7 -0
- sglang/bench_offline_throughput.py +10 -4
- sglang/bench_one_batch_server.py +67 -11
- sglang/bench_serving.py +86 -75
- sglang/lang/backend/runtime_endpoint.py +24 -1
- sglang/lang/interpreter.py +40 -1
- sglang/lang/ir.py +27 -0
- sglang/math_utils.py +8 -0
- sglang/profiler.py +167 -0
- sglang/srt/_custom_ops.py +34 -0
- sglang/srt/configs/internvl.py +8 -12
- sglang/srt/configs/model_config.py +33 -1
- sglang/srt/constrained/base_grammar_backend.py +5 -2
- sglang/srt/constrained/llguidance_backend.py +9 -8
- sglang/srt/constrained/outlines_backend.py +5 -4
- sglang/srt/constrained/xgrammar_backend.py +18 -18
- sglang/srt/conversation.py +52 -8
- sglang/srt/custom_op.py +38 -3
- sglang/srt/debug_utils.py +74 -0
- sglang/srt/disaggregation/base/__init__.py +1 -1
- sglang/srt/disaggregation/base/conn.py +25 -11
- sglang/srt/disaggregation/common/__init__.py +5 -0
- sglang/srt/disaggregation/common/conn.py +407 -0
- sglang/srt/disaggregation/common/utils.py +42 -0
- sglang/srt/disaggregation/decode.py +261 -52
- sglang/srt/disaggregation/fake/__init__.py +1 -1
- sglang/srt/disaggregation/fake/conn.py +16 -9
- sglang/srt/disaggregation/kv_events.py +60 -5
- sglang/srt/disaggregation/launch_lb.py +140 -0
- sglang/srt/disaggregation/mini_lb.py +29 -48
- sglang/srt/disaggregation/mooncake/__init__.py +1 -1
- sglang/srt/disaggregation/mooncake/conn.py +446 -149
- sglang/srt/disaggregation/mooncake/transfer_engine.py +32 -16
- sglang/srt/disaggregation/nixl/__init__.py +6 -1
- sglang/srt/disaggregation/nixl/conn.py +134 -437
- sglang/srt/disaggregation/prefill.py +130 -43
- sglang/srt/disaggregation/utils.py +127 -86
- sglang/srt/distributed/device_communicators/pymscclpp.py +315 -0
- sglang/srt/distributed/parallel_state.py +52 -5
- sglang/srt/entrypoints/EngineBase.py +6 -0
- sglang/srt/entrypoints/engine.py +116 -5
- sglang/srt/entrypoints/http_server.py +28 -4
- sglang/srt/eplb_simulator/__init__.py +1 -0
- sglang/srt/eplb_simulator/reader.py +51 -0
- sglang/srt/function_call/base_format_detector.py +138 -86
- sglang/srt/function_call/deepseekv3_detector.py +54 -6
- sglang/srt/function_call/ebnf_composer.py +33 -19
- sglang/srt/function_call/function_call_parser.py +27 -0
- sglang/srt/function_call/llama32_detector.py +33 -14
- sglang/srt/function_call/mistral_detector.py +73 -26
- sglang/srt/function_call/pythonic_detector.py +86 -20
- sglang/srt/function_call/qwen25_detector.py +64 -10
- sglang/srt/function_call/utils.py +17 -0
- sglang/srt/hf_transformers_utils.py +4 -0
- sglang/srt/layers/activation.py +19 -0
- sglang/srt/layers/attention/aiter_backend.py +503 -125
- sglang/srt/layers/attention/base_attn_backend.py +4 -0
- sglang/srt/layers/attention/cutlass_mla_backend.py +40 -34
- sglang/srt/layers/attention/flashattention_backend.py +137 -63
- sglang/srt/layers/attention/flashinfer_backend.py +46 -3
- sglang/srt/layers/attention/flashinfer_mla_backend.py +59 -25
- sglang/srt/layers/attention/flashmla_backend.py +2 -10
- sglang/srt/layers/attention/intel_amx_backend.py +128 -0
- sglang/srt/layers/attention/tbo_backend.py +232 -0
- sglang/srt/layers/attention/torch_native_backend.py +3 -0
- sglang/srt/layers/attention/triton_backend.py +304 -65
- sglang/srt/layers/attention/triton_ops/decode_attention.py +2 -7
- sglang/srt/layers/attention/triton_ops/extend_attention.py +12 -4
- sglang/srt/layers/attention/vision.py +51 -24
- sglang/srt/layers/communicator.py +281 -197
- sglang/srt/layers/dp_attention.py +6 -5
- sglang/srt/layers/layernorm.py +30 -19
- sglang/srt/layers/linear.py +0 -4
- sglang/srt/layers/logits_processor.py +0 -12
- sglang/srt/layers/moe/cutlass_moe.py +170 -7
- sglang/srt/layers/moe/cutlass_moe_params.py +169 -0
- sglang/srt/layers/moe/ep_moe/kernels.py +33 -11
- sglang/srt/layers/moe/ep_moe/layer.py +136 -72
- sglang/srt/layers/moe/ep_moe/token_dispatcher.py +24 -45
- sglang/srt/layers/moe/fused_moe_native.py +4 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=257,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/fused_moe.py +221 -29
- sglang/srt/layers/moe/fused_moe_triton/layer.py +34 -4
- sglang/srt/layers/moe/topk.py +60 -26
- sglang/srt/layers/multimodal.py +3 -3
- sglang/srt/layers/pooler.py +56 -0
- sglang/srt/layers/quantization/__init__.py +3 -2
- sglang/srt/layers/quantization/blockwise_int8.py +3 -0
- sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +5 -0
- sglang/srt/layers/quantization/deep_gemm_wrapper/__init__.py +1 -0
- sglang/srt/layers/quantization/{deep_gemm.py → deep_gemm_wrapper/compile_utils.py} +69 -127
- sglang/srt/layers/quantization/deep_gemm_wrapper/configurer.py +32 -0
- sglang/srt/layers/quantization/deep_gemm_wrapper/entrypoint.py +110 -0
- sglang/srt/layers/quantization/fp8.py +28 -23
- sglang/srt/layers/quantization/fp8_kernel.py +156 -75
- sglang/srt/layers/quantization/fp8_utils.py +250 -69
- sglang/srt/layers/quantization/modelopt_quant.py +334 -7
- sglang/srt/layers/quantization/moe_wna16.py +3 -0
- sglang/srt/layers/quantization/w8a8_fp8.py +3 -0
- sglang/srt/layers/quantization/w8a8_int8.py +3 -0
- sglang/srt/layers/radix_attention.py +2 -3
- sglang/srt/layers/rotary_embedding.py +6 -12
- sglang/srt/layers/sampler.py +80 -79
- sglang/srt/layers/utils.py +6 -0
- sglang/srt/lora/layers.py +12 -15
- sglang/srt/lora/lora.py +49 -5
- sglang/srt/lora/lora_manager.py +98 -39
- sglang/srt/lora/mem_pool.py +28 -21
- sglang/srt/lora/utils.py +17 -13
- sglang/srt/managers/cache_controller.py +2 -1
- sglang/srt/managers/data_parallel_controller.py +13 -5
- sglang/srt/managers/eplb_algorithms/__init__.py +63 -0
- sglang/srt/managers/eplb_algorithms/deepseek.py +223 -0
- sglang/srt/managers/{deepseek_eplb.py → eplb_algorithms/deepseek_vec.py} +5 -7
- sglang/srt/managers/eplb_manager.py +55 -14
- sglang/srt/managers/expert_distribution.py +220 -46
- sglang/srt/managers/expert_location.py +110 -56
- sglang/srt/managers/expert_location_dispatch.py +23 -6
- sglang/srt/managers/io_struct.py +43 -8
- sglang/srt/managers/mm_utils.py +88 -38
- sglang/srt/managers/multimodal_processors/base_processor.py +190 -18
- sglang/srt/managers/multimodal_processors/gemma3.py +4 -31
- sglang/srt/managers/multimodal_processors/internvl.py +4 -0
- sglang/srt/managers/multimodal_processors/kimi_vl.py +15 -34
- sglang/srt/managers/multimodal_processors/minicpm.py +2 -1
- sglang/srt/managers/multimodal_processors/phi4mm.py +87 -0
- sglang/srt/managers/multimodal_processors/qwen_vl.py +22 -64
- sglang/srt/managers/multimodal_processors/vila.py +85 -0
- sglang/srt/managers/schedule_batch.py +173 -38
- sglang/srt/managers/scheduler.py +376 -127
- sglang/srt/managers/tokenizer_manager.py +163 -19
- sglang/srt/managers/utils.py +0 -4
- sglang/srt/mem_cache/chunk_cache.py +1 -0
- sglang/srt/mem_cache/hiradix_cache.py +4 -2
- sglang/srt/mem_cache/memory_pool.py +111 -407
- sglang/srt/mem_cache/memory_pool_host.py +380 -0
- sglang/srt/mem_cache/radix_cache.py +36 -12
- sglang/srt/metrics/collector.py +9 -0
- sglang/srt/model_executor/cuda_graph_runner.py +191 -113
- sglang/srt/model_executor/expert_location_updater.py +157 -22
- sglang/srt/model_executor/forward_batch_info.py +52 -22
- sglang/srt/model_executor/model_runner.py +102 -62
- sglang/srt/model_loader/loader.py +8 -1
- sglang/srt/model_loader/utils.py +67 -1
- sglang/srt/models/bert.py +113 -13
- sglang/srt/models/deepseek_nextn.py +1 -1
- sglang/srt/models/deepseek_v2.py +623 -290
- sglang/srt/models/gemma3_causal.py +7 -0
- sglang/srt/models/gemma3_mm.py +19 -14
- sglang/srt/models/idefics2.py +342 -0
- sglang/srt/models/internvl.py +46 -102
- sglang/srt/models/kimi_vl.py +4 -4
- sglang/srt/models/llama.py +1 -1
- sglang/srt/models/minicpmo.py +2 -5
- sglang/srt/models/minicpmv.py +3 -295
- sglang/srt/models/phi4mm.py +512 -0
- sglang/srt/models/qwen2.py +38 -9
- sglang/srt/models/qwen2_5_vl.py +3 -9
- sglang/srt/models/qwen2_eagle.py +4 -1
- sglang/srt/models/qwen2_moe.py +58 -191
- sglang/srt/models/qwen2_vl.py +3 -9
- sglang/srt/models/qwen3.py +41 -10
- sglang/srt/models/qwen3_moe.py +230 -191
- sglang/srt/models/registry.py +9 -1
- sglang/srt/models/roberta.py +117 -9
- sglang/srt/models/transformers.py +291 -0
- sglang/srt/models/vila.py +305 -0
- sglang/srt/openai_api/adapter.py +248 -28
- sglang/srt/openai_api/protocol.py +68 -3
- sglang/srt/openai_api/utils.py +172 -0
- sglang/srt/operations.py +37 -2
- sglang/srt/operations_strategy.py +200 -24
- sglang/srt/sampling/sampling_batch_info.py +37 -1
- sglang/srt/sampling/sampling_params.py +4 -1
- sglang/srt/server_args.py +381 -209
- sglang/srt/speculative/build_eagle_tree.py +9 -9
- sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +12 -14
- sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py +256 -0
- sglang/srt/speculative/eagle_utils.py +440 -200
- sglang/srt/speculative/eagle_worker.py +234 -63
- sglang/srt/two_batch_overlap.py +637 -0
- sglang/srt/utils.py +187 -7
- sglang/test/attention/test_prefix_chunk_info.py +2 -0
- sglang/test/runners.py +54 -10
- sglang/test/send_one.py +4 -0
- sglang/test/test_block_fp8.py +1 -0
- sglang/test/test_block_fp8_deep_gemm_blackwell.py +252 -0
- sglang/test/test_block_fp8_ep.py +1 -0
- sglang/test/test_cutlass_moe.py +3 -3
- sglang/test/test_fp4_moe.py +248 -0
- sglang/test/test_utils.py +82 -7
- sglang/utils.py +9 -0
- sglang/version.py +1 -1
- {sglang-0.4.6.post5.dist-info → sglang-0.4.7.post1.dist-info}/METADATA +17 -14
- {sglang-0.4.6.post5.dist-info → sglang-0.4.7.post1.dist-info}/RECORD +359 -321
- {sglang-0.4.6.post5.dist-info → sglang-0.4.7.post1.dist-info}/WHEEL +1 -1
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=144,N=512,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=144,N=512,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1024,device_name=NVIDIA_H200.json → triton_3_1_0/E=16,N=1024,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json → triton_3_1_0/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=20,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=20,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=24,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=24,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_H20,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_H20,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=64,device_name=NVIDIA_L20,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=64,device_name=NVIDIA_L20,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=64,device_name=NVIDIA_L40S,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=64,device_name=NVIDIA_L40S,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=64,N=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=2560,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=64,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=14336,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=14336,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=14336,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=1792,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=1792,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=1792,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=3584,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=3584,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=3584,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_L40S.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_L40S.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=7168,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=7168,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=7168,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_H200.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H200.json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H200.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=96,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=96,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json → triton_3_2_0/E=264,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=264,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=264,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=264,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json → triton_3_2_0/E=272,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=272,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=272,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=64,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=272,N=64,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=288,N=64,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=288,N=64,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- {sglang-0.4.6.post5.dist-info → sglang-0.4.7.post1.dist-info}/licenses/LICENSE +0 -0
- {sglang-0.4.6.post5.dist-info → sglang-0.4.7.post1.dist-info}/top_level.txt +0 -0
@@ -15,7 +15,6 @@ from functools import partial
|
|
15
15
|
from typing import TYPE_CHECKING, Callable, Optional, Union
|
16
16
|
|
17
17
|
import torch
|
18
|
-
import triton
|
19
18
|
|
20
19
|
if os.environ["SGLANG_ENABLE_TORCH_COMPILE"] == "1":
|
21
20
|
import logging
|
@@ -29,10 +28,11 @@ from sglang.srt.layers.attention.flashinfer_backend import (
|
|
29
28
|
create_flashinfer_kv_indices_triton,
|
30
29
|
)
|
31
30
|
from sglang.srt.layers.dp_attention import get_attention_tp_size
|
31
|
+
from sglang.srt.layers.utils import is_sm100_supported
|
32
32
|
from sglang.srt.managers.schedule_batch import global_server_args_dict
|
33
33
|
from sglang.srt.model_executor.forward_batch_info import ForwardBatch, ForwardMode
|
34
34
|
from sglang.srt.speculative.eagle_utils import EagleDraftInput, EagleVerifyInput
|
35
|
-
from sglang.srt.utils import is_flashinfer_available
|
35
|
+
from sglang.srt.utils import is_flashinfer_available, next_power_of_2
|
36
36
|
|
37
37
|
if TYPE_CHECKING:
|
38
38
|
from sglang.srt.layers.radix_attention import RadixAttention
|
@@ -108,8 +108,11 @@ class FlashInferMLAAttnBackend(AttentionBackend):
|
|
108
108
|
else:
|
109
109
|
self.q_indptr_decode = q_indptr_decode_buf
|
110
110
|
|
111
|
+
fmha_backend = "auto"
|
112
|
+
if is_sm100_supported():
|
113
|
+
fmha_backend = "cutlass"
|
111
114
|
self.prefill_wrapper_ragged = BatchPrefillWithRaggedKVCacheWrapper(
|
112
|
-
self.workspace_buffer, "NHD"
|
115
|
+
self.workspace_buffer, "NHD", backend=fmha_backend
|
113
116
|
)
|
114
117
|
|
115
118
|
if not self.skip_prefill:
|
@@ -278,6 +281,28 @@ class FlashInferMLAAttnBackend(AttentionBackend):
|
|
278
281
|
)
|
279
282
|
self.prefill_cuda_graph_metadata[bs] = verify_wrapper
|
280
283
|
self.forward_metadata = PrefillMetadata(verify_wrapper, False)
|
284
|
+
elif forward_mode.is_draft_extend():
|
285
|
+
draft_extend_wrapper = BatchMLAPagedAttentionWrapper(
|
286
|
+
self.workspace_buffer,
|
287
|
+
use_cuda_graph=True,
|
288
|
+
qo_indptr=self.cuda_graph_qo_indptr[: bs + 1],
|
289
|
+
kv_indptr=self.cuda_graph_kv_indptr[: bs + 1],
|
290
|
+
kv_indices=self.cuda_graph_kv_indices,
|
291
|
+
kv_len_arr=self.cuda_graph_kv_lens[:bs],
|
292
|
+
backend="auto",
|
293
|
+
)
|
294
|
+
seq_lens_sum = seq_lens.sum().item()
|
295
|
+
self.indices_updater_prefill.update(
|
296
|
+
req_pool_indices,
|
297
|
+
seq_lens,
|
298
|
+
seq_lens_sum,
|
299
|
+
prefix_lens=None,
|
300
|
+
prefill_wrapper_paged=draft_extend_wrapper,
|
301
|
+
use_ragged=False,
|
302
|
+
spec_info=spec_info,
|
303
|
+
)
|
304
|
+
self.prefill_cuda_graph_metadata[bs] = draft_extend_wrapper
|
305
|
+
self.forward_metadata = PrefillMetadata(draft_extend_wrapper, False)
|
281
306
|
else:
|
282
307
|
raise ValueError(f"Invalid mode: {forward_mode=}")
|
283
308
|
|
@@ -325,6 +350,16 @@ class FlashInferMLAAttnBackend(AttentionBackend):
|
|
325
350
|
use_ragged=False,
|
326
351
|
spec_info=spec_info,
|
327
352
|
)
|
353
|
+
elif forward_mode.is_draft_extend():
|
354
|
+
self.indices_updater_prefill.update(
|
355
|
+
req_pool_indices[:bs],
|
356
|
+
seq_lens[:bs],
|
357
|
+
seq_lens_sum,
|
358
|
+
prefix_lens=None,
|
359
|
+
prefill_wrapper_paged=self.prefill_cuda_graph_metadata[bs],
|
360
|
+
use_ragged=False,
|
361
|
+
spec_info=spec_info,
|
362
|
+
)
|
328
363
|
else:
|
329
364
|
raise ValueError(f"Invalid forward mode: {forward_mode=}")
|
330
365
|
|
@@ -720,7 +755,7 @@ class FlashInferMLAMultiStepDraftBackend:
|
|
720
755
|
|
721
756
|
if topk > 1:
|
722
757
|
raise ValueError(
|
723
|
-
|
758
|
+
"Currently Flashinfer MLA only supports topk=1 for speculative decoding"
|
724
759
|
)
|
725
760
|
self.topk = topk
|
726
761
|
self.speculative_num_steps = speculative_num_steps
|
@@ -754,6 +789,7 @@ class FlashInferMLAMultiStepDraftBackend:
|
|
754
789
|
|
755
790
|
# Cached variables for generate_draft_decode_kv_indices
|
756
791
|
self.pool_len = model_runner.req_to_token_pool.req_to_token.shape[1]
|
792
|
+
self.page_size = model_runner.server_args.page_size
|
757
793
|
|
758
794
|
def common_template(
|
759
795
|
self,
|
@@ -774,14 +810,13 @@ class FlashInferMLAMultiStepDraftBackend:
|
|
774
810
|
kv_indices_buffer,
|
775
811
|
self.kv_indptr,
|
776
812
|
forward_batch.positions,
|
777
|
-
num_seqs,
|
778
|
-
self.topk,
|
779
813
|
self.pool_len,
|
780
814
|
kv_indices_buffer.shape[1],
|
781
815
|
self.kv_indptr.shape[1],
|
782
|
-
|
783
|
-
|
784
|
-
|
816
|
+
next_power_of_2(num_seqs),
|
817
|
+
next_power_of_2(self.speculative_num_steps),
|
818
|
+
next_power_of_2(bs),
|
819
|
+
self.page_size,
|
785
820
|
)
|
786
821
|
|
787
822
|
assert forward_batch.spec_info is not None
|
@@ -884,19 +919,18 @@ def fast_mla_decode_plan(
|
|
884
919
|
self._page_size = page_size
|
885
920
|
self._sm_scale = sm_scale
|
886
921
|
|
887
|
-
|
888
|
-
|
889
|
-
|
890
|
-
self.
|
891
|
-
|
892
|
-
|
893
|
-
|
894
|
-
|
895
|
-
|
896
|
-
|
897
|
-
|
898
|
-
|
899
|
-
|
900
|
-
|
901
|
-
|
902
|
-
raise RuntimeError(f"Error in alternate MLA plan: {e}")
|
922
|
+
try:
|
923
|
+
# Standard version with just the required arguments (no use_profiler)
|
924
|
+
self._cached_module.plan.default(
|
925
|
+
self._float_workspace_buffer,
|
926
|
+
self._int_workspace_buffer,
|
927
|
+
self._pin_memory_int_workspace_buffer,
|
928
|
+
qo_indptr_cpu,
|
929
|
+
kv_indptr_cpu,
|
930
|
+
kv_len_arr_cpu,
|
931
|
+
num_heads,
|
932
|
+
head_dim_ckv,
|
933
|
+
causal,
|
934
|
+
)
|
935
|
+
except Exception as e:
|
936
|
+
raise RuntimeError(f"Error in alternate MLA plan: {e}")
|
@@ -2,9 +2,6 @@ from __future__ import annotations
|
|
2
2
|
|
3
3
|
"""
|
4
4
|
Support attention backend for FlashMLA.
|
5
|
-
|
6
|
-
#TODO
|
7
|
-
Enable speculative sampling in FlashMLA
|
8
5
|
"""
|
9
6
|
|
10
7
|
from dataclasses import dataclass
|
@@ -14,8 +11,6 @@ import torch
|
|
14
11
|
import triton
|
15
12
|
from flash_mla import flash_mla_with_kvcache, get_mla_metadata
|
16
13
|
|
17
|
-
from sglang.global_config import global_config
|
18
|
-
from sglang.srt.layers.attention.base_attn_backend import AttentionBackend
|
19
14
|
from sglang.srt.layers.attention.flashinfer_mla_backend import FlashInferMLAAttnBackend
|
20
15
|
from sglang.srt.layers.attention.utils import create_flashmla_kv_indices_triton
|
21
16
|
from sglang.srt.layers.dp_attention import get_attention_tp_size
|
@@ -24,7 +19,6 @@ from sglang.srt.model_executor.forward_batch_info import ForwardBatch, ForwardMo
|
|
24
19
|
if TYPE_CHECKING:
|
25
20
|
from sglang.srt.layers.radix_attention import RadixAttention
|
26
21
|
from sglang.srt.model_executor.model_runner import ModelRunner
|
27
|
-
from sglang.srt.speculative.eagle_utils import EagleDraftInput, EagleVerifyInput
|
28
22
|
from sglang.srt.speculative.spec_info import SpecInfo
|
29
23
|
|
30
24
|
|
@@ -330,7 +324,7 @@ class FlashMLABackend(FlashInferMLAAttnBackend):
|
|
330
324
|
)
|
331
325
|
|
332
326
|
def get_cuda_graph_seq_len_fill_value(self):
|
333
|
-
return
|
327
|
+
return 1
|
334
328
|
|
335
329
|
def forward_decode(
|
336
330
|
self,
|
@@ -464,11 +458,9 @@ class FlashMLAMultiStepDraftBackend:
|
|
464
458
|
topk: int,
|
465
459
|
speculative_num_steps: int,
|
466
460
|
):
|
467
|
-
from sglang.srt.speculative.eagle_utils import generate_draft_decode_kv_indices
|
468
|
-
|
469
461
|
if topk > 1:
|
470
462
|
raise ValueError(
|
471
|
-
|
463
|
+
"Currently FlashMLA only supports topk=1 for speculative decoding"
|
472
464
|
)
|
473
465
|
self.topk = topk
|
474
466
|
self.speculative_num_steps = speculative_num_steps
|
@@ -0,0 +1,128 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
from typing import TYPE_CHECKING
|
4
|
+
|
5
|
+
import torch
|
6
|
+
|
7
|
+
from sglang.srt.layers.attention.base_attn_backend import AttentionBackend
|
8
|
+
from sglang.srt.model_executor.forward_batch_info import ForwardBatch
|
9
|
+
|
10
|
+
if TYPE_CHECKING:
|
11
|
+
from sglang.srt.layers.radix_attention import RadixAttention
|
12
|
+
from sglang.srt.model_executor.model_runner import ModelRunner
|
13
|
+
|
14
|
+
|
15
|
+
class IntelAMXAttnBackend(AttentionBackend):
|
16
|
+
def __init__(self, model_runner: ModelRunner):
|
17
|
+
import sgl_kernel
|
18
|
+
|
19
|
+
super().__init__()
|
20
|
+
self.forward_metadata = None
|
21
|
+
self.device = model_runner.device
|
22
|
+
|
23
|
+
self.num_head = (
|
24
|
+
model_runner.model_config.num_attention_heads // model_runner.tp_size
|
25
|
+
)
|
26
|
+
|
27
|
+
self.v_head_dim = model_runner.token_to_kv_pool.get_value_buffer(0).shape[-1]
|
28
|
+
|
29
|
+
self.decode_attention_fwd = torch.ops.sgl_kernel.decode_attention_cpu
|
30
|
+
self.extend_attention_fwd = torch.ops.sgl_kernel.extend_attention_cpu
|
31
|
+
|
32
|
+
def init_forward_metadata(self, forward_batch: ForwardBatch):
|
33
|
+
"""Init the metadata for a forward pass."""
|
34
|
+
|
35
|
+
bs = forward_batch.batch_size
|
36
|
+
attn_logits = torch.zeros(
|
37
|
+
(
|
38
|
+
bs,
|
39
|
+
self.num_head,
|
40
|
+
8, # self.num_kv_splits,
|
41
|
+
self.v_head_dim + 1,
|
42
|
+
),
|
43
|
+
dtype=torch.float32,
|
44
|
+
device=self.device,
|
45
|
+
)
|
46
|
+
if forward_batch.forward_mode.is_decode_or_idle():
|
47
|
+
max_extend_len = None
|
48
|
+
else:
|
49
|
+
max_extend_len = torch.max(forward_batch.extend_seq_lens).item()
|
50
|
+
self.forward_metadata = (attn_logits, max_extend_len)
|
51
|
+
|
52
|
+
def forward_extend(
|
53
|
+
self,
|
54
|
+
q,
|
55
|
+
k,
|
56
|
+
v,
|
57
|
+
layer: RadixAttention,
|
58
|
+
forward_batch: ForwardBatch,
|
59
|
+
save_kv_cache=True,
|
60
|
+
):
|
61
|
+
if layer.qk_head_dim != layer.v_head_dim:
|
62
|
+
o = q.new_empty((q.shape[0], layer.tp_q_head_num * layer.v_head_dim))
|
63
|
+
else:
|
64
|
+
o = torch.empty_like(q)
|
65
|
+
|
66
|
+
if save_kv_cache:
|
67
|
+
forward_batch.token_to_kv_pool.set_kv_buffer(
|
68
|
+
layer, forward_batch.out_cache_loc, k, v
|
69
|
+
)
|
70
|
+
|
71
|
+
_, max_extend_len = self.forward_metadata
|
72
|
+
|
73
|
+
self.extend_attention_fwd(
|
74
|
+
q.view(-1, layer.tp_q_head_num, layer.qk_head_dim),
|
75
|
+
k,
|
76
|
+
v,
|
77
|
+
o.view(-1, layer.tp_q_head_num, layer.v_head_dim),
|
78
|
+
forward_batch.token_to_kv_pool.get_key_buffer(layer.layer_id),
|
79
|
+
forward_batch.token_to_kv_pool.get_value_buffer(layer.layer_id),
|
80
|
+
forward_batch.req_to_token_pool.req_to_token,
|
81
|
+
forward_batch.req_pool_indices,
|
82
|
+
forward_batch.seq_lens,
|
83
|
+
forward_batch.extend_seq_lens,
|
84
|
+
forward_batch.extend_start_loc,
|
85
|
+
max_extend_len,
|
86
|
+
layer.scaling,
|
87
|
+
layer.logit_cap,
|
88
|
+
)
|
89
|
+
return o
|
90
|
+
|
91
|
+
def forward_decode(
|
92
|
+
self,
|
93
|
+
q: torch.Tensor,
|
94
|
+
k: torch.Tensor,
|
95
|
+
v: torch.Tensor,
|
96
|
+
layer: RadixAttention,
|
97
|
+
forward_batch: ForwardBatch,
|
98
|
+
save_kv_cache=True,
|
99
|
+
):
|
100
|
+
attn_logits, _ = self.forward_metadata
|
101
|
+
|
102
|
+
q = q.reshape(-1, layer.tp_q_head_num * layer.qk_head_dim)
|
103
|
+
|
104
|
+
if layer.qk_head_dim != layer.v_head_dim:
|
105
|
+
o = q.new_empty((q.shape[0], layer.tp_q_head_num * layer.v_head_dim))
|
106
|
+
else:
|
107
|
+
o = torch.empty_like(q)
|
108
|
+
|
109
|
+
self.decode_attention_fwd(
|
110
|
+
q.view(-1, layer.tp_q_head_num, layer.qk_head_dim),
|
111
|
+
forward_batch.token_to_kv_pool.get_key_buffer(layer.layer_id),
|
112
|
+
forward_batch.token_to_kv_pool.get_value_buffer(layer.layer_id),
|
113
|
+
o.view(-1, layer.tp_q_head_num, layer.v_head_dim),
|
114
|
+
k,
|
115
|
+
v,
|
116
|
+
forward_batch.out_cache_loc,
|
117
|
+
attn_logits,
|
118
|
+
forward_batch.req_to_token_pool.req_to_token,
|
119
|
+
forward_batch.req_pool_indices,
|
120
|
+
forward_batch.seq_lens,
|
121
|
+
layer.scaling,
|
122
|
+
layer.logit_cap,
|
123
|
+
)
|
124
|
+
|
125
|
+
return o
|
126
|
+
|
127
|
+
def support_triton(self):
|
128
|
+
return False
|
@@ -0,0 +1,232 @@
|
|
1
|
+
from typing import TYPE_CHECKING, Callable, List, Optional, Union
|
2
|
+
|
3
|
+
import torch
|
4
|
+
|
5
|
+
from sglang.srt import two_batch_overlap
|
6
|
+
from sglang.srt.layers.attention.base_attn_backend import AttentionBackend
|
7
|
+
from sglang.srt.speculative.eagle_utils import EagleDraftInput, EagleVerifyInput
|
8
|
+
|
9
|
+
if TYPE_CHECKING:
|
10
|
+
from sglang.srt.model_executor.forward_batch_info import ForwardBatch, ForwardMode
|
11
|
+
|
12
|
+
|
13
|
+
class TboAttnBackend(AttentionBackend):
|
14
|
+
def __init__(self, primary: AttentionBackend, children: List[AttentionBackend]):
|
15
|
+
super().__init__()
|
16
|
+
self.primary = primary
|
17
|
+
self.children = children
|
18
|
+
|
19
|
+
@classmethod
|
20
|
+
def init_new(cls, creator: Callable[[], AttentionBackend]):
|
21
|
+
return cls(
|
22
|
+
primary=creator(),
|
23
|
+
children=[creator() for _ in range(2)],
|
24
|
+
)
|
25
|
+
|
26
|
+
def init_forward_metadata(self, forward_batch: "ForwardBatch"):
|
27
|
+
self.primary.init_forward_metadata(forward_batch=forward_batch)
|
28
|
+
if forward_batch.tbo_children is not None:
|
29
|
+
for child, forward_batch_child in zip(
|
30
|
+
self.children, forward_batch.tbo_children, strict=True
|
31
|
+
):
|
32
|
+
if forward_batch_child.batch_size > 0:
|
33
|
+
child.init_forward_metadata(forward_batch=forward_batch_child)
|
34
|
+
|
35
|
+
def init_cuda_graph_state(self, max_bs: int):
|
36
|
+
self.primary.init_cuda_graph_state(max_bs=max_bs)
|
37
|
+
for item in self.children:
|
38
|
+
# TODO for children, maybe can provide *smaller* max_bs to optimize
|
39
|
+
item.init_cuda_graph_state(max_bs=max_bs)
|
40
|
+
|
41
|
+
def init_forward_metadata_capture_cuda_graph(
|
42
|
+
self,
|
43
|
+
bs: int,
|
44
|
+
num_tokens: int,
|
45
|
+
req_pool_indices: torch.Tensor,
|
46
|
+
seq_lens: torch.Tensor,
|
47
|
+
encoder_lens: Optional[torch.Tensor],
|
48
|
+
forward_mode: "ForwardMode",
|
49
|
+
spec_info: Optional[Union[EagleDraftInput, EagleVerifyInput]],
|
50
|
+
):
|
51
|
+
self.primary.init_forward_metadata_capture_cuda_graph(
|
52
|
+
bs=bs,
|
53
|
+
num_tokens=num_tokens,
|
54
|
+
req_pool_indices=req_pool_indices,
|
55
|
+
seq_lens=seq_lens,
|
56
|
+
encoder_lens=encoder_lens,
|
57
|
+
forward_mode=forward_mode,
|
58
|
+
spec_info=spec_info,
|
59
|
+
)
|
60
|
+
|
61
|
+
self._init_forward_metadata_cuda_graph_children(
|
62
|
+
fn_name="init_forward_metadata_capture_cuda_graph",
|
63
|
+
bs=bs,
|
64
|
+
req_pool_indices=req_pool_indices,
|
65
|
+
seq_lens=seq_lens,
|
66
|
+
encoder_lens=encoder_lens,
|
67
|
+
forward_mode=forward_mode,
|
68
|
+
spec_info=spec_info,
|
69
|
+
capture_num_tokens=num_tokens,
|
70
|
+
)
|
71
|
+
|
72
|
+
def init_forward_metadata_replay_cuda_graph(
|
73
|
+
self,
|
74
|
+
bs: int,
|
75
|
+
req_pool_indices: torch.Tensor,
|
76
|
+
seq_lens: torch.Tensor,
|
77
|
+
seq_lens_sum: int,
|
78
|
+
encoder_lens: Optional[torch.Tensor],
|
79
|
+
forward_mode: "ForwardMode",
|
80
|
+
spec_info: Optional[Union[EagleDraftInput, EagleVerifyInput]],
|
81
|
+
seq_lens_cpu: Optional[torch.Tensor],
|
82
|
+
):
|
83
|
+
self.primary.init_forward_metadata_replay_cuda_graph(
|
84
|
+
bs=bs,
|
85
|
+
req_pool_indices=req_pool_indices,
|
86
|
+
seq_lens=seq_lens,
|
87
|
+
seq_lens_sum=seq_lens_sum,
|
88
|
+
encoder_lens=encoder_lens,
|
89
|
+
forward_mode=forward_mode,
|
90
|
+
spec_info=spec_info,
|
91
|
+
seq_lens_cpu=seq_lens_cpu,
|
92
|
+
)
|
93
|
+
|
94
|
+
self._init_forward_metadata_cuda_graph_children(
|
95
|
+
fn_name="init_forward_metadata_replay_cuda_graph",
|
96
|
+
bs=bs,
|
97
|
+
req_pool_indices=req_pool_indices,
|
98
|
+
seq_lens=seq_lens,
|
99
|
+
encoder_lens=encoder_lens,
|
100
|
+
forward_mode=forward_mode,
|
101
|
+
spec_info=spec_info,
|
102
|
+
replay_seq_lens_sum=seq_lens_sum,
|
103
|
+
replay_seq_lens_cpu=seq_lens_cpu,
|
104
|
+
)
|
105
|
+
|
106
|
+
def _init_forward_metadata_cuda_graph_children(
|
107
|
+
self,
|
108
|
+
fn_name: str,
|
109
|
+
# common args
|
110
|
+
bs: int,
|
111
|
+
req_pool_indices: torch.Tensor,
|
112
|
+
seq_lens: torch.Tensor,
|
113
|
+
encoder_lens: Optional[torch.Tensor],
|
114
|
+
forward_mode: "ForwardMode",
|
115
|
+
spec_info: Optional[Union[EagleDraftInput, EagleVerifyInput]],
|
116
|
+
# capture args
|
117
|
+
capture_num_tokens: int = None,
|
118
|
+
# replay args
|
119
|
+
replay_seq_lens_sum: int = None,
|
120
|
+
replay_seq_lens_cpu: Optional[torch.Tensor] = None,
|
121
|
+
):
|
122
|
+
if fn_name == "init_forward_metadata_capture_cuda_graph":
|
123
|
+
assert capture_num_tokens == bs, "Only support num_tokens==bs currently"
|
124
|
+
num_tokens = bs
|
125
|
+
|
126
|
+
tbo_split_seq_index, tbo_split_token_index = (
|
127
|
+
two_batch_overlap.compute_split_indices_for_cuda_graph_replay(
|
128
|
+
forward_mode=forward_mode,
|
129
|
+
cuda_graph_num_tokens=num_tokens,
|
130
|
+
)
|
131
|
+
)
|
132
|
+
|
133
|
+
num_tokens_child_left = tbo_split_token_index
|
134
|
+
num_tokens_child_right = num_tokens - tbo_split_token_index
|
135
|
+
bs_child_left = num_tokens_child_left
|
136
|
+
bs_child_right = num_tokens_child_right
|
137
|
+
|
138
|
+
assert (
|
139
|
+
num_tokens_child_left > 0 and num_tokens_child_right > 0
|
140
|
+
), f"{num_tokens_child_left=} {num_tokens_child_right=} {forward_mode=} {num_tokens=}"
|
141
|
+
|
142
|
+
common_pre_split_args = dict(
|
143
|
+
fn_name=fn_name,
|
144
|
+
bs=bs,
|
145
|
+
req_pool_indices=req_pool_indices,
|
146
|
+
seq_lens=seq_lens,
|
147
|
+
encoder_lens=encoder_lens,
|
148
|
+
forward_mode=forward_mode,
|
149
|
+
spec_info=spec_info,
|
150
|
+
capture_num_tokens=capture_num_tokens,
|
151
|
+
replay_seq_lens_sum=replay_seq_lens_sum,
|
152
|
+
replay_seq_lens_cpu=replay_seq_lens_cpu,
|
153
|
+
)
|
154
|
+
|
155
|
+
args_left = _init_forward_metadata_cuda_graph_split(
|
156
|
+
output_bs=bs_child_left,
|
157
|
+
seq_slice=slice(None, tbo_split_seq_index),
|
158
|
+
**common_pre_split_args,
|
159
|
+
)
|
160
|
+
args_right = _init_forward_metadata_cuda_graph_split(
|
161
|
+
output_bs=bs_child_right,
|
162
|
+
seq_slice=slice(tbo_split_seq_index, None),
|
163
|
+
**common_pre_split_args,
|
164
|
+
)
|
165
|
+
|
166
|
+
child_left, child_right = self.children
|
167
|
+
getattr(child_left, fn_name)(**args_left)
|
168
|
+
getattr(child_right, fn_name)(**args_right)
|
169
|
+
|
170
|
+
def get_cuda_graph_seq_len_fill_value(self):
|
171
|
+
ans = self.primary.get_cuda_graph_seq_len_fill_value()
|
172
|
+
for child in self.children:
|
173
|
+
assert ans == child.get_cuda_graph_seq_len_fill_value()
|
174
|
+
return ans
|
175
|
+
|
176
|
+
def forward_extend(self, *args, **kwargs):
|
177
|
+
return self.primary.forward_extend(*args, **kwargs)
|
178
|
+
|
179
|
+
def forward_decode(self, *args, **kwargs):
|
180
|
+
return self.primary.forward_decode(*args, **kwargs)
|
181
|
+
|
182
|
+
|
183
|
+
def _init_forward_metadata_cuda_graph_split(
|
184
|
+
fn_name: str,
|
185
|
+
seq_slice: slice,
|
186
|
+
output_bs: int,
|
187
|
+
# common args
|
188
|
+
bs: int,
|
189
|
+
req_pool_indices: torch.Tensor,
|
190
|
+
seq_lens: torch.Tensor,
|
191
|
+
encoder_lens: Optional[torch.Tensor],
|
192
|
+
forward_mode: "ForwardMode",
|
193
|
+
spec_info: Optional[Union[EagleDraftInput, EagleVerifyInput]],
|
194
|
+
# capture args
|
195
|
+
capture_num_tokens: int = None,
|
196
|
+
# replay args
|
197
|
+
replay_seq_lens_sum: int = None,
|
198
|
+
replay_seq_lens_cpu: Optional[torch.Tensor] = None,
|
199
|
+
):
|
200
|
+
assert encoder_lens is None, "encoder_lens is not supported yet"
|
201
|
+
assert spec_info is None, "spec_info is not supported yet"
|
202
|
+
|
203
|
+
ans = dict(
|
204
|
+
bs=output_bs,
|
205
|
+
req_pool_indices=req_pool_indices[seq_slice],
|
206
|
+
seq_lens=seq_lens[seq_slice],
|
207
|
+
# directly forward
|
208
|
+
forward_mode=forward_mode,
|
209
|
+
# ignore
|
210
|
+
encoder_lens=None,
|
211
|
+
spec_info=None,
|
212
|
+
)
|
213
|
+
|
214
|
+
if fn_name == "init_forward_metadata_capture_cuda_graph":
|
215
|
+
assert capture_num_tokens == bs, "Only support num_tokens==bs currently"
|
216
|
+
ans.update(
|
217
|
+
dict(
|
218
|
+
num_tokens=output_bs,
|
219
|
+
)
|
220
|
+
)
|
221
|
+
elif fn_name == "init_forward_metadata_replay_cuda_graph":
|
222
|
+
output_seq_lens_cpu = replay_seq_lens_cpu[seq_slice]
|
223
|
+
ans.update(
|
224
|
+
dict(
|
225
|
+
seq_lens_sum=output_seq_lens_cpu.sum().item(),
|
226
|
+
seq_lens_cpu=output_seq_lens_cpu,
|
227
|
+
)
|
228
|
+
)
|
229
|
+
else:
|
230
|
+
raise NotImplementedError
|
231
|
+
|
232
|
+
return ans
|