sglang 0.4.6.post4__py3-none-any.whl → 0.4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sglang/bench_offline_throughput.py +16 -10
- sglang/bench_one_batch.py +5 -4
- sglang/bench_one_batch_server.py +86 -22
- sglang/bench_serving.py +197 -110
- sglang/compile_deep_gemm.py +4 -4
- sglang/lang/backend/runtime_endpoint.py +24 -1
- sglang/profiler.py +167 -0
- sglang/srt/_custom_ops.py +34 -0
- sglang/srt/configs/internvl.py +8 -12
- sglang/srt/configs/model_config.py +66 -29
- sglang/srt/constrained/base_grammar_backend.py +5 -2
- sglang/srt/constrained/llguidance_backend.py +9 -8
- sglang/srt/constrained/outlines_backend.py +5 -4
- sglang/srt/constrained/xgrammar_backend.py +18 -18
- sglang/srt/conversation.py +47 -9
- sglang/srt/custom_op.py +38 -3
- sglang/srt/debug_utils.py +74 -0
- sglang/srt/disaggregation/common/__init__.py +1 -0
- sglang/srt/disaggregation/common/conn.py +407 -0
- sglang/srt/disaggregation/decode.py +187 -134
- sglang/srt/disaggregation/decode_schedule_batch_mixin.py +142 -0
- sglang/srt/disaggregation/fake/conn.py +4 -13
- sglang/srt/disaggregation/kv_events.py +412 -0
- sglang/srt/disaggregation/launch_lb.py +140 -0
- sglang/srt/disaggregation/mini_lb.py +84 -70
- sglang/srt/disaggregation/mooncake/conn.py +441 -140
- sglang/srt/disaggregation/mooncake/transfer_engine.py +31 -14
- sglang/srt/disaggregation/nixl/conn.py +124 -442
- sglang/srt/disaggregation/prefill.py +128 -44
- sglang/srt/disaggregation/utils.py +154 -6
- sglang/srt/distributed/device_communicators/pymscclpp.py +315 -0
- sglang/srt/distributed/parallel_state.py +52 -5
- sglang/srt/distributed/utils.py +3 -3
- sglang/srt/entrypoints/EngineBase.py +11 -0
- sglang/srt/entrypoints/engine.py +129 -12
- sglang/srt/entrypoints/http_server.py +21 -6
- sglang/srt/entrypoints/http_server_engine.py +5 -2
- sglang/srt/function_call/base_format_detector.py +302 -0
- sglang/srt/function_call/core_types.py +34 -0
- sglang/srt/function_call/deepseekv3_detector.py +205 -0
- sglang/srt/function_call/ebnf_composer.py +248 -0
- sglang/srt/function_call/function_call_parser.py +202 -0
- sglang/srt/function_call/llama32_detector.py +93 -0
- sglang/srt/function_call/mistral_detector.py +131 -0
- sglang/srt/function_call/pythonic_detector.py +229 -0
- sglang/srt/function_call/qwen25_detector.py +121 -0
- sglang/srt/function_call/utils.py +52 -0
- sglang/srt/hf_transformers_utils.py +50 -7
- sglang/srt/layers/attention/aiter_backend.py +878 -0
- sglang/srt/layers/attention/base_attn_backend.py +4 -0
- sglang/srt/layers/attention/cutlass_mla_backend.py +2 -19
- sglang/srt/layers/attention/flashattention_backend.py +166 -35
- sglang/srt/layers/attention/flashinfer_backend.py +45 -1
- sglang/srt/layers/attention/flashinfer_mla_backend.py +45 -5
- sglang/srt/layers/attention/flashmla_backend.py +340 -78
- sglang/srt/layers/attention/intel_amx_backend.py +128 -0
- sglang/srt/layers/attention/tbo_backend.py +232 -0
- sglang/srt/layers/attention/torch_native_backend.py +3 -0
- sglang/srt/layers/attention/triton_backend.py +247 -5
- sglang/srt/layers/attention/triton_ops/extend_attention.py +12 -4
- sglang/srt/layers/attention/utils.py +2 -2
- sglang/srt/layers/attention/vision.py +1 -1
- sglang/srt/layers/communicator.py +517 -0
- sglang/srt/layers/dp_attention.py +6 -15
- sglang/srt/layers/layernorm.py +30 -19
- sglang/srt/layers/moe/cutlass_moe.py +370 -0
- sglang/srt/layers/moe/cutlass_moe_params.py +169 -0
- sglang/srt/layers/moe/ep_moe/kernels.py +60 -17
- sglang/srt/layers/moe/ep_moe/layer.py +195 -87
- sglang/srt/layers/moe/ep_moe/token_dispatcher.py +88 -8
- sglang/srt/layers/moe/fused_moe_native.py +4 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=257,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/fused_moe.py +220 -25
- sglang/srt/layers/moe/fused_moe_triton/layer.py +48 -4
- sglang/srt/layers/moe/topk.py +107 -24
- sglang/srt/layers/multimodal.py +70 -0
- sglang/srt/layers/quantization/__init__.py +10 -4
- sglang/srt/layers/quantization/blockwise_int8.py +3 -0
- sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +5 -0
- sglang/srt/layers/quantization/deep_gemm.py +60 -59
- sglang/srt/layers/quantization/fp8.py +113 -18
- sglang/srt/layers/quantization/fp8_kernel.py +118 -66
- sglang/srt/layers/quantization/fp8_utils.py +165 -43
- sglang/srt/layers/quantization/gptq.py +298 -6
- sglang/srt/layers/quantization/int8_kernel.py +18 -5
- sglang/srt/layers/quantization/modelopt_quant.py +334 -7
- sglang/srt/layers/quantization/moe_wna16.py +3 -0
- sglang/srt/layers/quantization/qoq.py +244 -0
- sglang/srt/layers/quantization/w8a8_fp8.py +3 -0
- sglang/srt/layers/quantization/w8a8_int8.py +3 -0
- sglang/srt/layers/rotary_embedding.py +6 -12
- sglang/srt/layers/sampler.py +80 -79
- sglang/srt/layers/utils.py +6 -0
- sglang/srt/lora/layers.py +12 -15
- sglang/srt/lora/lora.py +49 -5
- sglang/srt/lora/lora_manager.py +20 -8
- sglang/srt/lora/mem_pool.py +24 -16
- sglang/srt/lora/utils.py +17 -13
- sglang/srt/managers/data_parallel_controller.py +13 -5
- sglang/srt/managers/eplb_algorithms/__init__.py +63 -0
- sglang/srt/managers/eplb_algorithms/deepseek.py +223 -0
- sglang/srt/managers/eplb_algorithms/deepseek_vec.py +276 -0
- sglang/srt/managers/eplb_manager.py +96 -0
- sglang/srt/managers/expert_distribution.py +878 -56
- sglang/srt/managers/expert_location.py +448 -0
- sglang/srt/managers/expert_location_dispatch.py +108 -0
- sglang/srt/managers/io_struct.py +29 -5
- sglang/srt/managers/mm_utils.py +355 -151
- sglang/srt/managers/multimodal_processors/base_processor.py +299 -42
- sglang/srt/managers/multimodal_processors/deepseek_vl_v2.py +6 -1
- sglang/srt/managers/multimodal_processors/gemma3.py +15 -17
- sglang/srt/managers/multimodal_processors/internvl.py +18 -5
- sglang/srt/managers/multimodal_processors/janus_pro.py +7 -1
- sglang/srt/managers/multimodal_processors/kimi_vl.py +14 -32
- sglang/srt/managers/multimodal_processors/llava.py +3 -3
- sglang/srt/managers/multimodal_processors/minicpm.py +27 -32
- sglang/srt/managers/multimodal_processors/mllama4.py +6 -0
- sglang/srt/managers/multimodal_processors/phi4mm.py +87 -0
- sglang/srt/managers/multimodal_processors/pixtral.py +9 -9
- sglang/srt/managers/multimodal_processors/qwen_vl.py +35 -35
- sglang/srt/managers/schedule_batch.py +185 -55
- sglang/srt/managers/schedule_policy.py +4 -5
- sglang/srt/managers/scheduler.py +389 -154
- sglang/srt/managers/session_controller.py +1 -1
- sglang/srt/managers/tokenizer_manager.py +231 -39
- sglang/srt/managers/utils.py +0 -4
- sglang/srt/mem_cache/base_prefix_cache.py +3 -0
- sglang/srt/mem_cache/chunk_cache.py +3 -1
- sglang/srt/mem_cache/hiradix_cache.py +4 -4
- sglang/srt/mem_cache/memory_pool.py +74 -52
- sglang/srt/mem_cache/multimodal_cache.py +45 -0
- sglang/srt/mem_cache/radix_cache.py +58 -5
- sglang/srt/metrics/collector.py +11 -2
- sglang/srt/mm_utils.py +10 -0
- sglang/srt/model_executor/cuda_graph_runner.py +87 -65
- sglang/srt/model_executor/expert_location_updater.py +557 -0
- sglang/srt/model_executor/forward_batch_info.py +39 -14
- sglang/srt/model_executor/model_runner.py +231 -101
- sglang/srt/model_loader/loader.py +10 -6
- sglang/srt/model_loader/utils.py +67 -1
- sglang/srt/models/clip.py +5 -1
- sglang/srt/models/deepseek_nextn.py +1 -1
- sglang/srt/models/deepseek_v2.py +732 -403
- sglang/srt/models/exaone.py +8 -3
- sglang/srt/models/gemma3_causal.py +7 -0
- sglang/srt/models/gemma3_mm.py +75 -33
- sglang/srt/models/idefics2.py +342 -0
- sglang/srt/models/kimi_vl.py +4 -4
- sglang/srt/models/llama.py +1 -1
- sglang/srt/models/llama4.py +10 -2
- sglang/srt/models/llava.py +26 -18
- sglang/srt/models/mimo_mtp.py +220 -0
- sglang/srt/models/minicpmo.py +7 -17
- sglang/srt/models/minicpmv.py +3 -295
- sglang/srt/models/mistral.py +71 -1
- sglang/srt/models/mllama.py +3 -3
- sglang/srt/models/phi4mm.py +512 -0
- sglang/srt/models/qwen2.py +133 -35
- sglang/srt/models/qwen2_5_vl.py +5 -3
- sglang/srt/models/qwen2_eagle.py +4 -1
- sglang/srt/models/qwen2_moe.py +206 -69
- sglang/srt/models/qwen2_vl.py +3 -3
- sglang/srt/models/qwen3.py +92 -19
- sglang/srt/models/qwen3_moe.py +457 -55
- sglang/srt/models/registry.py +9 -1
- sglang/srt/models/siglip.py +294 -0
- sglang/srt/models/transformers.py +291 -0
- sglang/srt/openai_api/adapter.py +114 -40
- sglang/srt/openai_api/protocol.py +37 -2
- sglang/srt/openai_api/utils.py +172 -0
- sglang/srt/operations.py +189 -0
- sglang/srt/operations_strategy.py +207 -0
- sglang/srt/sampling/sampling_batch_info.py +13 -1
- sglang/srt/sampling/sampling_params.py +2 -1
- sglang/srt/server_args.py +235 -38
- sglang/srt/speculative/build_eagle_tree.py +8 -8
- sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +8 -11
- sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py +253 -0
- sglang/srt/speculative/eagle_utils.py +181 -90
- sglang/srt/speculative/eagle_worker.py +146 -21
- sglang/srt/two_batch_overlap.py +635 -0
- sglang/srt/utils.py +197 -19
- sglang/test/runners.py +16 -7
- sglang/test/send_one.py +4 -0
- sglang/test/test_cutlass_moe.py +278 -0
- sglang/test/test_fp4_moe.py +248 -0
- sglang/test/test_utils.py +81 -42
- sglang/utils.py +2 -2
- sglang/version.py +1 -1
- {sglang-0.4.6.post4.dist-info → sglang-0.4.7.dist-info}/METADATA +31 -19
- sglang-0.4.7.dist-info/RECORD +699 -0
- {sglang-0.4.6.post4.dist-info → sglang-0.4.7.dist-info}/WHEEL +1 -1
- sglang/srt/function_call_parser.py +0 -858
- sglang/srt/platforms/interface.py +0 -371
- sglang-0.4.6.post4.dist-info/RECORD +0 -646
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=144,N=512,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=144,N=512,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1024,device_name=NVIDIA_H200.json → triton_3_1_0/E=16,N=1024,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json → triton_3_1_0/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=20,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=20,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=24,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=24,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_H20,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_H20,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=64,device_name=NVIDIA_L20,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=64,device_name=NVIDIA_L20,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=64,device_name=NVIDIA_L40S,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=64,device_name=NVIDIA_L40S,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=64,N=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=2560,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=64,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=14336,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=14336,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=14336,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=1792,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=1792,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=1792,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=3584,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=3584,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=3584,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_L40S.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_L40S.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=7168,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=7168,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=7168,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_H200.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H200.json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H200.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=96,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=96,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json → triton_3_2_0/E=264,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=264,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=264,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=264,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json → triton_3_2_0/E=272,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=272,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=272,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=64,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=272,N=64,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=288,N=64,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=288,N=64,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/models/{xiaomi_mimo.py → mimo.py} +0 -0
- {sglang-0.4.6.post4.dist-info → sglang-0.4.7.dist-info}/licenses/LICENSE +0 -0
- {sglang-0.4.6.post4.dist-info → sglang-0.4.7.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,253 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
import bisect
|
4
|
+
from typing import TYPE_CHECKING, Callable
|
5
|
+
|
6
|
+
import torch
|
7
|
+
|
8
|
+
from sglang.srt.model_executor.cuda_graph_runner import (
|
9
|
+
CUDA_GRAPH_CAPTURE_FAILED_MSG,
|
10
|
+
CudaGraphRunner,
|
11
|
+
LogitsProcessorOutput,
|
12
|
+
get_batch_sizes_to_capture,
|
13
|
+
get_global_graph_memory_pool,
|
14
|
+
model_capture_mode,
|
15
|
+
set_global_graph_memory_pool,
|
16
|
+
set_torch_compile_config,
|
17
|
+
)
|
18
|
+
from sglang.srt.model_executor.forward_batch_info import (
|
19
|
+
CaptureHiddenMode,
|
20
|
+
ForwardBatch,
|
21
|
+
ForwardMode,
|
22
|
+
)
|
23
|
+
from sglang.srt.speculative.eagle_utils import EagleDraftInput, fast_topk
|
24
|
+
|
25
|
+
if TYPE_CHECKING:
|
26
|
+
from sglang.srt.speculative.eagle_worker import EAGLEWorker
|
27
|
+
|
28
|
+
|
29
|
+
class EAGLEDraftExtendCudaGraphRunner:
|
30
|
+
def __init__(self, eagle_worker: EAGLEWorker):
|
31
|
+
# Parse args
|
32
|
+
self.eagle_worker = eagle_worker
|
33
|
+
self.model_runner = model_runner = eagle_worker.model_runner
|
34
|
+
self.graphs = {}
|
35
|
+
self.output_buffers = {}
|
36
|
+
self.enable_torch_compile = model_runner.server_args.enable_torch_compile
|
37
|
+
self.disable_padding = model_runner.server_args.disable_cuda_graph_padding
|
38
|
+
self.tp_size = self.model_runner.tp_size
|
39
|
+
self.dp_size = model_runner.server_args.dp_size
|
40
|
+
self.speculative_num_steps = model_runner.server_args.speculative_num_steps
|
41
|
+
self.topk = model_runner.server_args.speculative_eagle_topk
|
42
|
+
self.capture_bs, self.compile_bs = get_batch_sizes_to_capture(model_runner)
|
43
|
+
self.padded_static_len = -1
|
44
|
+
|
45
|
+
# Attention backend
|
46
|
+
self.num_tokens_per_bs = self.speculative_num_steps + 1
|
47
|
+
self.max_bs = max(self.capture_bs)
|
48
|
+
self.max_num_token = self.max_bs * self.num_tokens_per_bs
|
49
|
+
|
50
|
+
self.eagle_worker.draft_extend_attn_backend.init_cuda_graph_state(
|
51
|
+
self.max_num_token
|
52
|
+
)
|
53
|
+
self.seq_len_fill_value = (
|
54
|
+
self.eagle_worker.draft_extend_attn_backend.get_cuda_graph_seq_len_fill_value()
|
55
|
+
)
|
56
|
+
self.seq_lens_cpu = torch.full(
|
57
|
+
(self.max_bs,), self.seq_len_fill_value, dtype=torch.int32
|
58
|
+
)
|
59
|
+
|
60
|
+
if self.enable_torch_compile:
|
61
|
+
set_torch_compile_config()
|
62
|
+
|
63
|
+
# Graph inputs
|
64
|
+
with torch.device("cuda"):
|
65
|
+
self.input_ids = torch.zeros((self.max_num_token,), dtype=torch.int64)
|
66
|
+
self.req_pool_indices = torch.zeros((self.max_bs,), dtype=torch.int32)
|
67
|
+
self.out_cache_loc = torch.ones((self.max_num_token,), dtype=torch.int64)
|
68
|
+
self.positions = torch.zeros((self.max_num_token,), dtype=torch.int64)
|
69
|
+
|
70
|
+
if self.eagle_worker.speculative_algorithm.is_eagle3():
|
71
|
+
self.hidden_states = torch.zeros(
|
72
|
+
(
|
73
|
+
self.max_num_token,
|
74
|
+
self.model_runner.model_config.hidden_size * 3,
|
75
|
+
),
|
76
|
+
dtype=self.model_runner.dtype,
|
77
|
+
)
|
78
|
+
else:
|
79
|
+
self.hidden_states = torch.zeros(
|
80
|
+
(self.max_num_token, self.model_runner.model_config.hidden_size),
|
81
|
+
dtype=self.model_runner.dtype,
|
82
|
+
)
|
83
|
+
|
84
|
+
self.seq_lens = torch.ones((self.max_bs,), dtype=torch.int32)
|
85
|
+
self.extend_seq_lens = torch.ones((self.max_bs,), dtype=torch.int32)
|
86
|
+
self.accept_length = (
|
87
|
+
torch.ones((self.max_bs,), dtype=torch.int32) * self.num_tokens_per_bs
|
88
|
+
)
|
89
|
+
|
90
|
+
# Capture
|
91
|
+
try:
|
92
|
+
with model_capture_mode():
|
93
|
+
self.capture()
|
94
|
+
except RuntimeError as e:
|
95
|
+
raise Exception(
|
96
|
+
f"Capture cuda graph failed: {e}\n{CUDA_GRAPH_CAPTURE_FAILED_MSG}"
|
97
|
+
)
|
98
|
+
|
99
|
+
def can_run(self, forward_batch: ForwardBatch):
|
100
|
+
batch_size = forward_batch.seq_lens.numel()
|
101
|
+
|
102
|
+
is_bs_supported = (
|
103
|
+
batch_size in self.graphs
|
104
|
+
if self.disable_padding
|
105
|
+
else batch_size <= self.max_bs
|
106
|
+
)
|
107
|
+
|
108
|
+
return is_bs_supported
|
109
|
+
|
110
|
+
def capture(self):
|
111
|
+
CudaGraphRunner.capture(self)
|
112
|
+
|
113
|
+
def capture_one_batch_size(self, bs: int, forward: Callable):
|
114
|
+
graph = torch.cuda.CUDAGraph()
|
115
|
+
stream = self.stream
|
116
|
+
num_tokens = bs * self.num_tokens_per_bs
|
117
|
+
|
118
|
+
# Graph inputs
|
119
|
+
input_ids = self.input_ids[:num_tokens]
|
120
|
+
req_pool_indices = self.req_pool_indices[:bs]
|
121
|
+
seq_lens = self.seq_lens[:bs]
|
122
|
+
extend_seq_lens = self.extend_seq_lens[:bs]
|
123
|
+
accept_length = self.accept_length[:bs]
|
124
|
+
out_cache_loc = self.out_cache_loc[:num_tokens]
|
125
|
+
positions = self.positions[:num_tokens]
|
126
|
+
hidden_states = self.hidden_states[:num_tokens]
|
127
|
+
|
128
|
+
spec_info = EagleDraftInput(
|
129
|
+
hidden_states=hidden_states,
|
130
|
+
accept_length=accept_length,
|
131
|
+
)
|
132
|
+
spec_info.positions = None
|
133
|
+
|
134
|
+
# Forward batch
|
135
|
+
forward_batch = ForwardBatch(
|
136
|
+
forward_mode=ForwardMode.DRAFT_EXTEND,
|
137
|
+
batch_size=bs,
|
138
|
+
input_ids=input_ids,
|
139
|
+
req_pool_indices=req_pool_indices,
|
140
|
+
seq_lens=seq_lens,
|
141
|
+
req_to_token_pool=self.model_runner.req_to_token_pool,
|
142
|
+
token_to_kv_pool=self.model_runner.token_to_kv_pool,
|
143
|
+
out_cache_loc=out_cache_loc,
|
144
|
+
seq_lens_sum=seq_lens.sum().item(),
|
145
|
+
return_logprob=False,
|
146
|
+
positions=positions,
|
147
|
+
spec_algorithm=self.model_runner.spec_algorithm,
|
148
|
+
spec_info=spec_info,
|
149
|
+
capture_hidden_mode=CaptureHiddenMode.LAST,
|
150
|
+
attn_backend=self.eagle_worker.draft_extend_attn_backend,
|
151
|
+
extend_seq_lens=extend_seq_lens,
|
152
|
+
padded_static_len=self.padded_static_len,
|
153
|
+
)
|
154
|
+
|
155
|
+
self.eagle_worker.draft_extend_attn_backend.init_forward_metadata_capture_cuda_graph(
|
156
|
+
bs=bs,
|
157
|
+
num_tokens=num_tokens,
|
158
|
+
req_pool_indices=req_pool_indices,
|
159
|
+
seq_lens=seq_lens,
|
160
|
+
encoder_lens=None,
|
161
|
+
forward_mode=ForwardMode.DRAFT_EXTEND,
|
162
|
+
spec_info=spec_info,
|
163
|
+
)
|
164
|
+
|
165
|
+
# Run and capture
|
166
|
+
def run_once():
|
167
|
+
# Backup two fields, which will be modified in-place in `draft_forward`.
|
168
|
+
output_cache_loc_backup = forward_batch.out_cache_loc
|
169
|
+
hidden_states_backup = forward_batch.spec_info.hidden_states
|
170
|
+
|
171
|
+
ret = self.eagle_worker.draft_model_runner.model.forward(
|
172
|
+
forward_batch.input_ids,
|
173
|
+
forward_batch.positions,
|
174
|
+
forward_batch,
|
175
|
+
)
|
176
|
+
probs = torch.softmax(ret.next_token_logits, dim=-1)
|
177
|
+
ret.topk_p, ret.topk_index = fast_topk(probs, self.topk, dim=-1)
|
178
|
+
|
179
|
+
forward_batch.out_cache_loc = output_cache_loc_backup
|
180
|
+
forward_batch.spec_info.hidden_states = hidden_states_backup
|
181
|
+
return ret
|
182
|
+
|
183
|
+
for _ in range(2):
|
184
|
+
torch.cuda.synchronize()
|
185
|
+
self.model_runner.tp_group.barrier()
|
186
|
+
|
187
|
+
run_once()
|
188
|
+
|
189
|
+
with torch.cuda.graph(
|
190
|
+
graph, pool=get_global_graph_memory_pool(), stream=stream
|
191
|
+
):
|
192
|
+
out = run_once()
|
193
|
+
|
194
|
+
set_global_graph_memory_pool(graph.pool())
|
195
|
+
return graph, out
|
196
|
+
|
197
|
+
def replay(self, forward_batch: ForwardBatch):
|
198
|
+
assert forward_batch.out_cache_loc is not None
|
199
|
+
# batch_size and num_seqs can be different in case there are finished examples
|
200
|
+
# in the batch, which will not be counted as num_seqs
|
201
|
+
raw_bs = forward_batch.batch_size
|
202
|
+
num_tokens = forward_batch.input_ids.shape[0]
|
203
|
+
|
204
|
+
index = bisect.bisect_left(self.capture_bs, raw_bs)
|
205
|
+
bs = self.capture_bs[index]
|
206
|
+
if bs * self.num_tokens_per_bs != num_tokens:
|
207
|
+
self.seq_lens.fill_(1)
|
208
|
+
self.accept_length.fill_(1)
|
209
|
+
self.out_cache_loc.zero_()
|
210
|
+
|
211
|
+
# Common inputs
|
212
|
+
self.input_ids[:num_tokens].copy_(forward_batch.input_ids)
|
213
|
+
self.seq_lens[:raw_bs].copy_(forward_batch.seq_lens)
|
214
|
+
self.extend_seq_lens[:raw_bs].copy_(forward_batch.extend_seq_lens)
|
215
|
+
self.out_cache_loc[:num_tokens].copy_(forward_batch.out_cache_loc)
|
216
|
+
self.positions[:num_tokens].copy_(forward_batch.positions)
|
217
|
+
self.hidden_states[:num_tokens].copy_(forward_batch.spec_info.hidden_states)
|
218
|
+
self.accept_length[:raw_bs].copy_(forward_batch.spec_info.accept_length)
|
219
|
+
self.req_pool_indices[:raw_bs].copy_(forward_batch.req_pool_indices)
|
220
|
+
|
221
|
+
if forward_batch.seq_lens_cpu is not None:
|
222
|
+
if bs != raw_bs:
|
223
|
+
self.seq_lens_cpu.fill_(1)
|
224
|
+
self.seq_lens_cpu[:raw_bs].copy_(forward_batch.seq_lens_cpu)
|
225
|
+
|
226
|
+
if bs != raw_bs:
|
227
|
+
forward_batch.spec_info.accept_length = self.accept_length[:bs]
|
228
|
+
forward_batch.spec_info.positions = None
|
229
|
+
|
230
|
+
self.eagle_worker.draft_extend_attn_backend.init_forward_metadata_replay_cuda_graph(
|
231
|
+
bs=bs,
|
232
|
+
req_pool_indices=self.req_pool_indices,
|
233
|
+
seq_lens=self.seq_lens,
|
234
|
+
seq_lens_sum=forward_batch.seq_lens_sum + (bs - raw_bs),
|
235
|
+
encoder_lens=None,
|
236
|
+
forward_mode=ForwardMode.DRAFT_EXTEND,
|
237
|
+
spec_info=forward_batch.spec_info,
|
238
|
+
seq_lens_cpu=self.seq_lens_cpu,
|
239
|
+
)
|
240
|
+
|
241
|
+
# Replay
|
242
|
+
self.graphs[bs].replay()
|
243
|
+
out = self.output_buffers[bs]
|
244
|
+
if bs != raw_bs:
|
245
|
+
forward_batch.spec_info.accept_length = self.accept_length[:raw_bs]
|
246
|
+
out_copy = out
|
247
|
+
out = LogitsProcessorOutput(
|
248
|
+
next_token_logits=out.next_token_logits[:raw_bs],
|
249
|
+
hidden_states=out.hidden_states[:raw_bs],
|
250
|
+
)
|
251
|
+
out.topk_p = out_copy.topk_p[:raw_bs]
|
252
|
+
out.topk_index = out_copy.topk_index[:raw_bs]
|
253
|
+
return out
|
@@ -1,24 +1,28 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
|
+
import logging
|
3
4
|
import os
|
5
|
+
import time
|
4
6
|
from dataclasses import dataclass
|
5
|
-
from typing import
|
7
|
+
from typing import List, Optional
|
6
8
|
|
7
9
|
import torch
|
8
10
|
import torch.nn.functional as F
|
9
11
|
import triton
|
10
12
|
import triton.language as tl
|
11
13
|
|
14
|
+
from sglang.srt.constrained.base_grammar_backend import BaseGrammarObject
|
12
15
|
from sglang.srt.layers.attention.utils import create_flashinfer_kv_indices_triton
|
13
16
|
from sglang.srt.layers.logits_processor import LogitsProcessorOutput
|
17
|
+
from sglang.srt.layers.sampler import apply_custom_logit_processor
|
14
18
|
from sglang.srt.managers.schedule_batch import (
|
19
|
+
Req,
|
15
20
|
ScheduleBatch,
|
16
21
|
get_last_loc,
|
17
22
|
global_server_args_dict,
|
18
23
|
)
|
19
24
|
from sglang.srt.mem_cache.memory_pool import TokenToKVPoolAllocator
|
20
|
-
from sglang.srt.model_executor.forward_batch_info import CaptureHiddenMode
|
21
|
-
from sglang.srt.speculative.build_eagle_tree import build_tree_kernel_efficient
|
25
|
+
from sglang.srt.model_executor.forward_batch_info import CaptureHiddenMode, ForwardMode
|
22
26
|
from sglang.srt.utils import fast_topk, is_cuda, is_hip, next_power_of_2
|
23
27
|
|
24
28
|
if is_cuda():
|
@@ -31,15 +35,15 @@ if is_cuda():
|
|
31
35
|
elif is_hip():
|
32
36
|
from sgl_kernel import verify_tree_greedy
|
33
37
|
|
34
|
-
if TYPE_CHECKING:
|
35
|
-
from sglang.srt.managers.schedule_batch import ScheduleBatch
|
36
|
-
|
37
|
-
import logging
|
38
38
|
|
39
39
|
logger = logging.getLogger(__name__)
|
40
40
|
|
41
41
|
|
42
|
+
# Simulate acceptance length for benchmarking purposes
|
42
43
|
SIMULATE_ACC_LEN = os.environ.get("SIMULATE_ACC_LEN")
|
44
|
+
SIMULATE_ACC_METHOD = os.environ.get("SIMULATE_ACC_METHOD", "multinomial")
|
45
|
+
|
46
|
+
TREE_TRAVERSE_TIME_THRESHOLD = 1 # TODO: set this properly
|
43
47
|
|
44
48
|
|
45
49
|
@dataclass
|
@@ -82,32 +86,28 @@ class EagleDraftInput:
|
|
82
86
|
batch: ScheduleBatch,
|
83
87
|
speculative_num_steps: int,
|
84
88
|
):
|
85
|
-
|
86
|
-
|
87
|
-
batch.extend_lens = [x + 1 for x in accept_length_cpu]
|
89
|
+
batch.forward_mode = ForwardMode.DRAFT_EXTEND
|
90
|
+
batch.input_ids = self.verified_id
|
91
|
+
batch.extend_lens = [x + 1 for x in batch.spec_info.accept_length_cpu]
|
88
92
|
batch.extend_num_tokens = sum(batch.extend_lens)
|
89
93
|
batch.seq_lens = batch.spec_info.seq_lens_for_draft_extend
|
90
94
|
batch.req_pool_indices = batch.spec_info.req_pool_indices_for_draft_extend
|
91
|
-
|
95
|
+
batch.return_logprob = False
|
92
96
|
|
93
|
-
self.
|
94
|
-
new_verified_id = torch.empty_like(self.accept_length, dtype=torch.int32)
|
97
|
+
self.capture_hidden_mode = CaptureHiddenMode.LAST
|
95
98
|
self.accept_length.add_(1)
|
99
|
+
self.positions = torch.empty_like(batch.input_ids, dtype=torch.long)
|
100
|
+
self.verified_id = torch.empty_like(self.accept_length, dtype=torch.int32)
|
96
101
|
|
97
|
-
|
98
|
-
|
102
|
+
create_extend_after_decode_spec_info[(len(batch.seq_lens),)](
|
103
|
+
batch.input_ids,
|
99
104
|
batch.seq_lens,
|
100
105
|
self.accept_length,
|
101
|
-
torch.cumsum(self.accept_length, axis=0, dtype=torch.int),
|
102
106
|
self.positions,
|
103
|
-
|
104
|
-
next_power_of_2(speculative_num_steps + 1),
|
107
|
+
self.verified_id,
|
108
|
+
next_power_of_2(max(speculative_num_steps + 1, len(batch.seq_lens))),
|
105
109
|
)
|
106
110
|
|
107
|
-
batch.seq_lens_sum = sum(seq_lens_cpu)
|
108
|
-
batch.input_ids = self.verified_id
|
109
|
-
self.verified_id = new_verified_id
|
110
|
-
|
111
111
|
def generate_attn_arg_prefill(
|
112
112
|
self,
|
113
113
|
req_pool_indices: torch.Tensor,
|
@@ -123,8 +123,9 @@ class EagleDraftInput:
|
|
123
123
|
cum_kv_seq_len = torch.zeros((bs + 1,), dtype=torch.int32, device="cuda")
|
124
124
|
cum_kv_seq_len[1:] = torch.cumsum(paged_kernel_lens, dim=0)
|
125
125
|
|
126
|
-
|
127
|
-
|
126
|
+
kv_indices = torch.empty(
|
127
|
+
paged_kernel_lens_sum, dtype=torch.int32, device="cuda"
|
128
|
+
)
|
128
129
|
|
129
130
|
create_flashinfer_kv_indices_triton[(bs,)](
|
130
131
|
req_to_token,
|
@@ -184,54 +185,13 @@ class EagleVerifyInput:
|
|
184
185
|
retrive_next_token: torch.Tensor
|
185
186
|
retrive_next_sibling: torch.Tensor
|
186
187
|
retrive_cum_len: torch.Tensor
|
187
|
-
draft_token_num: int
|
188
188
|
spec_steps: int
|
189
|
+
topk: int
|
190
|
+
draft_token_num: int
|
189
191
|
capture_hidden_mode: CaptureHiddenMode
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
cls,
|
194
|
-
verified_id: torch.Tensor,
|
195
|
-
score_list: List[torch.Tensor],
|
196
|
-
token_list: List[torch.Tensor],
|
197
|
-
parents_list: List[torch.Tensor],
|
198
|
-
seq_lens: torch.Tensor,
|
199
|
-
seq_lens_sum: int,
|
200
|
-
topk: int,
|
201
|
-
spec_steps: int,
|
202
|
-
num_verify_tokens: int,
|
203
|
-
):
|
204
|
-
(
|
205
|
-
tree_mask,
|
206
|
-
position,
|
207
|
-
retrive_index,
|
208
|
-
retrive_next_token,
|
209
|
-
retrive_next_sibling,
|
210
|
-
draft_tokens,
|
211
|
-
) = build_tree_kernel_efficient(
|
212
|
-
verified_id,
|
213
|
-
score_list,
|
214
|
-
token_list,
|
215
|
-
parents_list,
|
216
|
-
seq_lens,
|
217
|
-
seq_lens_sum,
|
218
|
-
topk,
|
219
|
-
spec_steps,
|
220
|
-
num_verify_tokens,
|
221
|
-
)
|
222
|
-
|
223
|
-
return cls(
|
224
|
-
draft_tokens,
|
225
|
-
tree_mask,
|
226
|
-
position,
|
227
|
-
retrive_index,
|
228
|
-
retrive_next_token,
|
229
|
-
retrive_next_sibling,
|
230
|
-
None,
|
231
|
-
num_verify_tokens,
|
232
|
-
spec_steps,
|
233
|
-
CaptureHiddenMode.FULL,
|
234
|
-
)
|
192
|
+
seq_lens_sum: int
|
193
|
+
seq_lens_cpu: torch.Tensor
|
194
|
+
grammar: BaseGrammarObject = None
|
235
195
|
|
236
196
|
def prepare_for_verify(self, batch: ScheduleBatch, page_size: int):
|
237
197
|
batch.input_ids = self.draft_token
|
@@ -307,6 +267,7 @@ class EagleVerifyInput:
|
|
307
267
|
logits_output: torch.Tensor,
|
308
268
|
token_to_kv_pool_allocator: TokenToKVPoolAllocator,
|
309
269
|
page_size: int,
|
270
|
+
vocab_mask: Optional[torch.Tensor] = None,
|
310
271
|
) -> torch.Tensor:
|
311
272
|
"""
|
312
273
|
Verify and find accepted tokens based on logits output and batch
|
@@ -343,6 +304,13 @@ class EagleVerifyInput:
|
|
343
304
|
torch.repeat_interleave(linear_penalty, self.draft_token_num, dim=0)
|
344
305
|
)
|
345
306
|
|
307
|
+
# Apply grammar mask
|
308
|
+
if vocab_mask is not None:
|
309
|
+
assert self.grammar is not None
|
310
|
+
self.grammar.apply_vocab_mask(
|
311
|
+
logits=logits_output.next_token_logits, vocab_mask=vocab_mask
|
312
|
+
)
|
313
|
+
|
346
314
|
# Sample tokens
|
347
315
|
if batch.sampling_info.is_all_greedy:
|
348
316
|
target_predict = torch.argmax(logits_output.next_token_logits, dim=-1)
|
@@ -440,6 +408,15 @@ class EagleVerifyInput:
|
|
440
408
|
break
|
441
409
|
else:
|
442
410
|
new_accept_index_.append(idx)
|
411
|
+
# update grammar state
|
412
|
+
if req.grammar is not None:
|
413
|
+
try:
|
414
|
+
req.grammar.accept_token(id)
|
415
|
+
except ValueError as e:
|
416
|
+
logger.info(
|
417
|
+
f"{i=}, {req=}\n" f"{accept_index=}\n" f"{predict=}\n"
|
418
|
+
)
|
419
|
+
raise e
|
443
420
|
if not req.finished():
|
444
421
|
new_accept_index.extend(new_accept_index_)
|
445
422
|
unfinished_index.append(i)
|
@@ -544,26 +521,28 @@ class EagleVerifyInput:
|
|
544
521
|
|
545
522
|
|
546
523
|
@triton.jit
|
547
|
-
def
|
524
|
+
def create_extend_after_decode_spec_info(
|
548
525
|
verified_id,
|
549
|
-
|
550
|
-
|
551
|
-
accept_len_cum,
|
526
|
+
seq_lens,
|
527
|
+
accept_lens,
|
552
528
|
positions,
|
553
529
|
new_verified_id,
|
554
|
-
|
530
|
+
bs_upper: tl.constexpr,
|
555
531
|
):
|
556
532
|
pid = tl.program_id(axis=0)
|
557
|
-
|
558
|
-
seq_length = tl.load(
|
559
|
-
accept_length = tl.load(
|
560
|
-
|
561
|
-
|
562
|
-
|
563
|
-
|
564
|
-
|
565
|
-
|
566
|
-
|
533
|
+
offsets = tl.arange(0, bs_upper)
|
534
|
+
seq_length = tl.load(seq_lens + pid)
|
535
|
+
accept_length = tl.load(accept_lens + pid)
|
536
|
+
|
537
|
+
accept_len_cumsum = tl.sum(
|
538
|
+
tl.load(accept_lens + offsets, mask=offsets < pid, other=0)
|
539
|
+
)
|
540
|
+
positions_ptr = positions + accept_len_cumsum
|
541
|
+
mask = offsets < accept_length
|
542
|
+
tl.store(positions_ptr + offsets, seq_length - accept_length + offsets, mask)
|
543
|
+
|
544
|
+
accept_len_cumsum += accept_length - 1
|
545
|
+
verified_id_data = tl.load(verified_id + accept_len_cumsum)
|
567
546
|
tl.store(new_verified_id + pid, verified_id_data)
|
568
547
|
|
569
548
|
|
@@ -584,8 +563,8 @@ def assign_req_to_token_pool(
|
|
584
563
|
token_pool = req_to_token + tl.load(req_pool_indices + pid) * pool_len
|
585
564
|
|
586
565
|
length_offset = tl.arange(0, bs_upper)
|
587
|
-
start = tl.load(start_offset + length_offset, mask=length_offset < pid)
|
588
|
-
end = tl.load(end_offset + length_offset, mask=length_offset < pid)
|
566
|
+
start = tl.load(start_offset + length_offset, mask=length_offset < pid, other=0)
|
567
|
+
end = tl.load(end_offset + length_offset, mask=length_offset < pid, other=0)
|
589
568
|
out_offset = tl.sum(end - start, axis=0)
|
590
569
|
|
591
570
|
out_cache_ptr = out_cache_loc + out_offset
|
@@ -666,7 +645,7 @@ def generate_draft_decode_kv_indices(
|
|
666
645
|
iters += 1
|
667
646
|
|
668
647
|
load_offset = tl.arange(0, bs_upper)
|
669
|
-
seq_lens = tl.load(paged_kernel_lens + load_offset, mask=load_offset < bid)
|
648
|
+
seq_lens = tl.load(paged_kernel_lens + load_offset, mask=load_offset < bid, other=0)
|
670
649
|
seq_len = tl.load(paged_kernel_lens + bid)
|
671
650
|
cum_seq_len = tl.sum(seq_lens)
|
672
651
|
|
@@ -695,7 +674,7 @@ def generate_draft_decode_kv_indices(
|
|
695
674
|
zid = bid * topk + topk_id
|
696
675
|
if zid == 0:
|
697
676
|
zid = num_seqs * topk
|
698
|
-
positions = tl.load(positions + bs_offset, mask=bs_offset < zid)
|
677
|
+
positions = tl.load(positions + bs_offset, mask=bs_offset < zid, other=0)
|
699
678
|
base = tl.sum(positions)
|
700
679
|
tl.store(kv_indptr + zid, base + zid * iters)
|
701
680
|
|
@@ -713,7 +692,9 @@ def align_evict_mask_to_page_size(
|
|
713
692
|
bid = tl.program_id(axis=0)
|
714
693
|
seq_len = tl.load(seq_lens + bid)
|
715
694
|
io_mask = t_range < num_draft_tokens
|
716
|
-
mask_row = tl.load(
|
695
|
+
mask_row = tl.load(
|
696
|
+
evict_mask + bid * num_draft_tokens + t_range, mask=io_mask, other=0
|
697
|
+
)
|
717
698
|
|
718
699
|
num_trues = tl.sum(mask_row)
|
719
700
|
num_false = num_draft_tokens - num_trues
|
@@ -801,3 +782,113 @@ def _generate_simulated_accept_index(
|
|
801
782
|
accept_length.fill_(simulate_acc_len - 1)
|
802
783
|
predict.fill_(100) # some legit token id
|
803
784
|
return sim_accept_index
|
785
|
+
|
786
|
+
|
787
|
+
def traverse_tree(
|
788
|
+
retrieve_next_token: torch.Tensor,
|
789
|
+
retrieve_next_sibling: torch.Tensor,
|
790
|
+
draft_tokens: torch.Tensor,
|
791
|
+
grammar: BaseGrammarObject,
|
792
|
+
allocate_token_bitmask: torch.Tensor,
|
793
|
+
):
|
794
|
+
"""
|
795
|
+
Traverse the tree constructed by the draft model to generate the logits mask.
|
796
|
+
"""
|
797
|
+
assert (
|
798
|
+
retrieve_next_token.shape == retrieve_next_sibling.shape == draft_tokens.shape
|
799
|
+
)
|
800
|
+
|
801
|
+
allocate_token_bitmask.fill_(0)
|
802
|
+
|
803
|
+
def dfs(
|
804
|
+
curr: int,
|
805
|
+
retrieve_next_token: torch.Tensor,
|
806
|
+
retrieve_next_sibling: torch.Tensor,
|
807
|
+
parent_pos: int,
|
808
|
+
):
|
809
|
+
if curr == 0:
|
810
|
+
# the first token generated by the target model, and thus it is always
|
811
|
+
# accepted from the previous iteration
|
812
|
+
accepted = True
|
813
|
+
else:
|
814
|
+
parent_bitmask = allocate_token_bitmask[parent_pos]
|
815
|
+
curr_token_id = draft_tokens[curr]
|
816
|
+
# 32 boolean bitmask values are packed into 32-bit integers
|
817
|
+
accepted = (
|
818
|
+
parent_bitmask[curr_token_id // 32] & (1 << (curr_token_id % 32))
|
819
|
+
) != 0
|
820
|
+
|
821
|
+
if accepted:
|
822
|
+
if curr != 0:
|
823
|
+
# Accept the current token
|
824
|
+
grammar.accept_token(draft_tokens[curr])
|
825
|
+
if not grammar.is_terminated():
|
826
|
+
# Generate the bitmask for the current token
|
827
|
+
grammar.fill_vocab_mask(allocate_token_bitmask, curr)
|
828
|
+
if retrieve_next_token[curr] != -1:
|
829
|
+
# Visit the child node
|
830
|
+
dfs(
|
831
|
+
retrieve_next_token[curr],
|
832
|
+
retrieve_next_token,
|
833
|
+
retrieve_next_sibling,
|
834
|
+
curr,
|
835
|
+
)
|
836
|
+
|
837
|
+
if curr != 0:
|
838
|
+
# Rollback the current token
|
839
|
+
grammar.rollback(1)
|
840
|
+
|
841
|
+
if retrieve_next_sibling[curr] != -1:
|
842
|
+
# Visit the sibling node
|
843
|
+
dfs(
|
844
|
+
retrieve_next_sibling[curr],
|
845
|
+
retrieve_next_token,
|
846
|
+
retrieve_next_sibling,
|
847
|
+
parent_pos,
|
848
|
+
)
|
849
|
+
|
850
|
+
dfs(0, retrieve_next_token, retrieve_next_sibling, -1)
|
851
|
+
|
852
|
+
|
853
|
+
def generate_token_bitmask(
|
854
|
+
reqs: List[Req],
|
855
|
+
verify_input: EagleVerifyInput,
|
856
|
+
retrieve_next_token_cpu: torch.Tensor,
|
857
|
+
retrieve_next_sibling_cpu: torch.Tensor,
|
858
|
+
draft_tokens_cpu: torch.Tensor,
|
859
|
+
vocab_size: int,
|
860
|
+
):
|
861
|
+
"""
|
862
|
+
Generate the logit mask for structured output.
|
863
|
+
Draft model's token can be either valid or invalid with respect to the grammar.
|
864
|
+
We need to perform DFS to figure out:
|
865
|
+
1. which tokens are accepted by the grammar
|
866
|
+
2. what is the corresponding logit mask.
|
867
|
+
"""
|
868
|
+
|
869
|
+
num_draft_tokens = draft_tokens_cpu.shape[-1]
|
870
|
+
|
871
|
+
allocate_token_bitmask = None
|
872
|
+
assert len(reqs) == retrieve_next_token_cpu.shape[0]
|
873
|
+
grammar = None
|
874
|
+
for i, req in enumerate(reqs):
|
875
|
+
if req.grammar is not None:
|
876
|
+
if allocate_token_bitmask is None:
|
877
|
+
allocate_token_bitmask = req.grammar.allocate_vocab_mask(
|
878
|
+
vocab_size=vocab_size,
|
879
|
+
batch_size=draft_tokens_cpu.numel(),
|
880
|
+
device="cpu",
|
881
|
+
)
|
882
|
+
grammar = req.grammar
|
883
|
+
traverse_tree(
|
884
|
+
retrieve_next_token_cpu[i],
|
885
|
+
retrieve_next_sibling_cpu[i],
|
886
|
+
draft_tokens_cpu[i],
|
887
|
+
req.grammar,
|
888
|
+
allocate_token_bitmask[
|
889
|
+
i * num_draft_tokens : (i + 1) * num_draft_tokens
|
890
|
+
],
|
891
|
+
)
|
892
|
+
|
893
|
+
verify_input.grammar = grammar
|
894
|
+
return allocate_token_bitmask
|