sglang 0.4.6.post5__py3-none-any.whl → 0.4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sglang/bench_offline_throughput.py +10 -4
- sglang/bench_one_batch_server.py +67 -11
- sglang/bench_serving.py +85 -74
- sglang/lang/backend/runtime_endpoint.py +24 -1
- sglang/profiler.py +167 -0
- sglang/srt/_custom_ops.py +34 -0
- sglang/srt/configs/internvl.py +8 -12
- sglang/srt/configs/model_config.py +27 -1
- sglang/srt/constrained/base_grammar_backend.py +5 -2
- sglang/srt/constrained/llguidance_backend.py +9 -8
- sglang/srt/constrained/outlines_backend.py +5 -4
- sglang/srt/constrained/xgrammar_backend.py +18 -18
- sglang/srt/conversation.py +46 -8
- sglang/srt/custom_op.py +38 -3
- sglang/srt/debug_utils.py +74 -0
- sglang/srt/disaggregation/common/__init__.py +1 -0
- sglang/srt/disaggregation/common/conn.py +407 -0
- sglang/srt/disaggregation/decode.py +67 -3
- sglang/srt/disaggregation/fake/conn.py +1 -0
- sglang/srt/disaggregation/kv_events.py +60 -5
- sglang/srt/disaggregation/launch_lb.py +140 -0
- sglang/srt/disaggregation/mini_lb.py +29 -48
- sglang/srt/disaggregation/mooncake/conn.py +432 -140
- sglang/srt/disaggregation/mooncake/transfer_engine.py +32 -16
- sglang/srt/disaggregation/nixl/conn.py +124 -432
- sglang/srt/disaggregation/prefill.py +2 -0
- sglang/srt/disaggregation/utils.py +38 -1
- sglang/srt/distributed/device_communicators/pymscclpp.py +315 -0
- sglang/srt/distributed/parallel_state.py +52 -5
- sglang/srt/entrypoints/EngineBase.py +6 -0
- sglang/srt/entrypoints/engine.py +102 -5
- sglang/srt/entrypoints/http_server.py +15 -2
- sglang/srt/function_call/base_format_detector.py +138 -86
- sglang/srt/function_call/deepseekv3_detector.py +54 -6
- sglang/srt/function_call/ebnf_composer.py +33 -19
- sglang/srt/function_call/function_call_parser.py +27 -0
- sglang/srt/function_call/llama32_detector.py +33 -14
- sglang/srt/function_call/mistral_detector.py +73 -26
- sglang/srt/function_call/pythonic_detector.py +86 -20
- sglang/srt/function_call/qwen25_detector.py +64 -10
- sglang/srt/function_call/utils.py +17 -0
- sglang/srt/hf_transformers_utils.py +4 -0
- sglang/srt/layers/attention/aiter_backend.py +488 -123
- sglang/srt/layers/attention/base_attn_backend.py +4 -0
- sglang/srt/layers/attention/cutlass_mla_backend.py +2 -19
- sglang/srt/layers/attention/flashattention_backend.py +103 -18
- sglang/srt/layers/attention/flashinfer_backend.py +45 -1
- sglang/srt/layers/attention/flashinfer_mla_backend.py +37 -1
- sglang/srt/layers/attention/intel_amx_backend.py +128 -0
- sglang/srt/layers/attention/tbo_backend.py +232 -0
- sglang/srt/layers/attention/torch_native_backend.py +3 -0
- sglang/srt/layers/attention/triton_backend.py +244 -5
- sglang/srt/layers/attention/triton_ops/extend_attention.py +12 -4
- sglang/srt/layers/communicator.py +260 -194
- sglang/srt/layers/dp_attention.py +6 -5
- sglang/srt/layers/layernorm.py +30 -19
- sglang/srt/layers/moe/cutlass_moe.py +170 -7
- sglang/srt/layers/moe/cutlass_moe_params.py +169 -0
- sglang/srt/layers/moe/ep_moe/kernels.py +27 -6
- sglang/srt/layers/moe/ep_moe/layer.py +94 -40
- sglang/srt/layers/moe/ep_moe/token_dispatcher.py +13 -8
- sglang/srt/layers/moe/fused_moe_native.py +4 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=257,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/fused_moe.py +220 -25
- sglang/srt/layers/moe/fused_moe_triton/layer.py +34 -4
- sglang/srt/layers/moe/topk.py +44 -18
- sglang/srt/layers/multimodal.py +3 -3
- sglang/srt/layers/quantization/__init__.py +3 -2
- sglang/srt/layers/quantization/blockwise_int8.py +3 -0
- sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +5 -0
- sglang/srt/layers/quantization/deep_gemm.py +55 -56
- sglang/srt/layers/quantization/fp8.py +28 -23
- sglang/srt/layers/quantization/fp8_kernel.py +118 -66
- sglang/srt/layers/quantization/fp8_utils.py +165 -49
- sglang/srt/layers/quantization/modelopt_quant.py +334 -7
- sglang/srt/layers/quantization/moe_wna16.py +3 -0
- sglang/srt/layers/quantization/w8a8_fp8.py +3 -0
- sglang/srt/layers/quantization/w8a8_int8.py +3 -0
- sglang/srt/layers/rotary_embedding.py +6 -12
- sglang/srt/layers/sampler.py +80 -79
- sglang/srt/layers/utils.py +6 -0
- sglang/srt/lora/layers.py +12 -15
- sglang/srt/lora/lora.py +49 -5
- sglang/srt/lora/lora_manager.py +19 -5
- sglang/srt/lora/mem_pool.py +24 -16
- sglang/srt/lora/utils.py +17 -13
- sglang/srt/managers/data_parallel_controller.py +13 -5
- sglang/srt/managers/eplb_algorithms/__init__.py +63 -0
- sglang/srt/managers/eplb_algorithms/deepseek.py +223 -0
- sglang/srt/managers/{deepseek_eplb.py → eplb_algorithms/deepseek_vec.py} +5 -7
- sglang/srt/managers/eplb_manager.py +55 -14
- sglang/srt/managers/expert_distribution.py +220 -46
- sglang/srt/managers/expert_location.py +110 -56
- sglang/srt/managers/expert_location_dispatch.py +23 -6
- sglang/srt/managers/io_struct.py +15 -4
- sglang/srt/managers/mm_utils.py +88 -38
- sglang/srt/managers/multimodal_processors/base_processor.py +188 -16
- sglang/srt/managers/multimodal_processors/gemma3.py +4 -31
- sglang/srt/managers/multimodal_processors/internvl.py +4 -0
- sglang/srt/managers/multimodal_processors/kimi_vl.py +15 -34
- sglang/srt/managers/multimodal_processors/minicpm.py +2 -1
- sglang/srt/managers/multimodal_processors/phi4mm.py +87 -0
- sglang/srt/managers/multimodal_processors/qwen_vl.py +22 -64
- sglang/srt/managers/schedule_batch.py +140 -38
- sglang/srt/managers/scheduler.py +305 -112
- sglang/srt/managers/tokenizer_manager.py +134 -17
- sglang/srt/managers/utils.py +0 -4
- sglang/srt/metrics/collector.py +9 -0
- sglang/srt/model_executor/cuda_graph_runner.py +72 -61
- sglang/srt/model_executor/expert_location_updater.py +157 -22
- sglang/srt/model_executor/forward_batch_info.py +38 -17
- sglang/srt/model_executor/model_runner.py +96 -56
- sglang/srt/model_loader/utils.py +67 -1
- sglang/srt/models/deepseek_nextn.py +1 -1
- sglang/srt/models/deepseek_v2.py +609 -234
- sglang/srt/models/gemma3_causal.py +7 -0
- sglang/srt/models/gemma3_mm.py +19 -14
- sglang/srt/models/idefics2.py +342 -0
- sglang/srt/models/kimi_vl.py +4 -4
- sglang/srt/models/llama.py +1 -1
- sglang/srt/models/minicpmo.py +2 -5
- sglang/srt/models/minicpmv.py +3 -295
- sglang/srt/models/phi4mm.py +512 -0
- sglang/srt/models/qwen2.py +38 -9
- sglang/srt/models/qwen2_5_vl.py +3 -9
- sglang/srt/models/qwen2_eagle.py +4 -1
- sglang/srt/models/qwen2_moe.py +58 -191
- sglang/srt/models/qwen2_vl.py +3 -9
- sglang/srt/models/qwen3.py +41 -10
- sglang/srt/models/qwen3_moe.py +230 -191
- sglang/srt/models/registry.py +9 -1
- sglang/srt/models/transformers.py +291 -0
- sglang/srt/openai_api/adapter.py +86 -24
- sglang/srt/openai_api/protocol.py +31 -2
- sglang/srt/openai_api/utils.py +172 -0
- sglang/srt/operations.py +37 -2
- sglang/srt/operations_strategy.py +200 -24
- sglang/srt/sampling/sampling_batch_info.py +13 -1
- sglang/srt/sampling/sampling_params.py +2 -1
- sglang/srt/server_args.py +114 -27
- sglang/srt/speculative/build_eagle_tree.py +8 -8
- sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +8 -11
- sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py +253 -0
- sglang/srt/speculative/eagle_utils.py +51 -91
- sglang/srt/speculative/eagle_worker.py +101 -21
- sglang/srt/two_batch_overlap.py +635 -0
- sglang/srt/utils.py +129 -7
- sglang/test/runners.py +16 -7
- sglang/test/send_one.py +4 -0
- sglang/test/test_cutlass_moe.py +3 -3
- sglang/test/test_fp4_moe.py +248 -0
- sglang/test/test_utils.py +79 -6
- sglang/version.py +1 -1
- {sglang-0.4.6.post5.dist-info → sglang-0.4.7.dist-info}/METADATA +14 -11
- {sglang-0.4.6.post5.dist-info → sglang-0.4.7.dist-info}/RECORD +318 -291
- {sglang-0.4.6.post5.dist-info → sglang-0.4.7.dist-info}/WHEEL +1 -1
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=144,N=512,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=144,N=512,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1024,device_name=NVIDIA_H200.json → triton_3_1_0/E=16,N=1024,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json → triton_3_1_0/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json → triton_3_1_0/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=20,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=20,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=24,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=24,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_H20,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_H20,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=64,device_name=NVIDIA_L20,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=64,device_name=NVIDIA_L20,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=256,N=64,device_name=NVIDIA_L40S,dtype=int8_w8a8.json → triton_3_1_0/E=256,N=64,device_name=NVIDIA_L40S,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=64,N=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=1280,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=2560,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=320,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=320,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_1_0/E=64,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=64,N=640,device_name=NVIDIA_H200.json → triton_3_1_0/E=64,N=640,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=14336,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=14336,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=14336,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=14336,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=1792,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=1792,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=1792,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=1792,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=2048,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=3584,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=3584,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=3584,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=3584,device_name=NVIDIA_L40S.json → triton_3_1_0/E=8,N=3584,device_name=NVIDIA_L40S.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=4096,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=AMD_Instinct_MI300X.json → triton_3_1_0/E=8,N=7168,device_name=AMD_Instinct_MI300X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=AMD_Instinct_MI325X.json → triton_3_1_0/E=8,N=7168,device_name=AMD_Instinct_MI325X.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=AMD_Radeon_Graphics.json → triton_3_1_0/E=8,N=7168,device_name=AMD_Radeon_Graphics.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=7168,device_name=NVIDIA_H200.json → triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json → triton_3_1_0/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=192,device_name=NVIDIA_H200.json → triton_3_2_0/E=128,N=192,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=384,device_name=NVIDIA_H200.json → triton_3_2_0/E=128,N=384,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H100_80GB_HBM3.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H100_80GB_HBM3.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=768,device_name=NVIDIA_H200.json → triton_3_2_0/E=128,N=768,device_name=NVIDIA_H200.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=128,N=96,device_name=NVIDIA_H20.json → triton_3_2_0/E=128,N=96,device_name=NVIDIA_H20.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json → triton_3_2_0/E=264,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=264,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=264,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=264,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=264,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json → triton_3_2_0/E=272,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=272,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_2_0/E=272,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=272,N=64,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=272,N=64,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- /sglang/srt/layers/moe/fused_moe_triton/configs/{E=288,N=64,device_name=NVIDIA_A800-SXM4-80GB.json → triton_3_2_0/E=288,N=64,device_name=NVIDIA_A800-SXM4-80GB.json} +0 -0
- {sglang-0.4.6.post5.dist-info → sglang-0.4.7.dist-info}/licenses/LICENSE +0 -0
- {sglang-0.4.6.post5.dist-info → sglang-0.4.7.dist-info}/top_level.txt +0 -0
@@ -14,7 +14,8 @@
|
|
14
14
|
|
15
15
|
from dataclasses import dataclass
|
16
16
|
from enum import Enum, auto
|
17
|
-
from
|
17
|
+
from functools import partial
|
18
|
+
from typing import Dict, Optional
|
18
19
|
|
19
20
|
import torch.distributed
|
20
21
|
|
@@ -36,10 +37,23 @@ from sglang.srt.model_executor.forward_batch_info import ForwardBatch
|
|
36
37
|
|
37
38
|
|
38
39
|
class ScatterMode(Enum):
|
40
|
+
"""
|
41
|
+
Suppose we have TP=4, DP=2, enable-dp-attention, and the system handles seq a,b,c,d
|
42
|
+
Model input/output: [ab, ab, cd, cd] for four ranks respectively
|
43
|
+
SCATTERED: [a, b, c, d]
|
44
|
+
TP_ATTN_FULL: [ab, ab, cd, cd], i.e. all ranks inside a TP attn group have full data of the group
|
45
|
+
FULL: [abcd, abcd, abcd, abcd]
|
46
|
+
"""
|
47
|
+
|
39
48
|
SCATTERED = auto()
|
40
49
|
TP_ATTN_FULL = auto()
|
41
50
|
FULL = auto()
|
42
51
|
|
52
|
+
@staticmethod
|
53
|
+
def model_input_output():
|
54
|
+
"""The scatter mode for model forward pass input and output data"""
|
55
|
+
return ScatterMode.TP_ATTN_FULL
|
56
|
+
|
43
57
|
|
44
58
|
@dataclass
|
45
59
|
class _LayerModeComputationContext:
|
@@ -81,7 +95,7 @@ class LayerScatterModes:
|
|
81
95
|
@classmethod
|
82
96
|
def _compute_layer_input_mode(cls, context: _LayerModeComputationContext):
|
83
97
|
if context.layer_id == 0:
|
84
|
-
return ScatterMode.
|
98
|
+
return ScatterMode.model_input_output()
|
85
99
|
return cls._compute_layer_output_mode(context.previous_layer())
|
86
100
|
|
87
101
|
@classmethod
|
@@ -112,7 +126,7 @@ class LayerScatterModes:
|
|
112
126
|
def _compute_layer_output_mode(cls, context: _LayerModeComputationContext):
|
113
127
|
mlp_mode = cls._compute_mlp_mode(context)
|
114
128
|
if context.layer_id == context.num_layers - 1:
|
115
|
-
return ScatterMode.
|
129
|
+
return ScatterMode.model_input_output()
|
116
130
|
if mlp_mode == ScatterMode.SCATTERED:
|
117
131
|
return ScatterMode.SCATTERED
|
118
132
|
if mlp_mode == ScatterMode.FULL:
|
@@ -135,15 +149,29 @@ class LayerCommunicator:
|
|
135
149
|
self.input_layernorm = input_layernorm
|
136
150
|
self.post_attention_layernorm = post_attention_layernorm
|
137
151
|
|
138
|
-
self.
|
139
|
-
self.
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
152
|
+
self._context = CommunicateContext.init_new()
|
153
|
+
self._communicate_simple_fn = CommunicateSimpleFn.get_fn(
|
154
|
+
input_mode=self.layer_scatter_modes.layer_input_mode,
|
155
|
+
output_mode=self.layer_scatter_modes.attn_mode,
|
156
|
+
context=self._context,
|
157
|
+
)
|
158
|
+
self._communicate_with_all_reduce_and_layer_norm_fn = (
|
159
|
+
CommunicateWithAllReduceAndLayerNormFn.get_fn(
|
160
|
+
hidden_states_input_mode=self.layer_scatter_modes.attn_mode,
|
161
|
+
residual_input_mode=self.layer_scatter_modes.layer_input_mode,
|
162
|
+
hidden_states_output_mode=self.layer_scatter_modes.mlp_mode,
|
163
|
+
residual_output_mode=self.layer_scatter_modes.middle_residual_mode,
|
164
|
+
context=self._context,
|
165
|
+
)
|
166
|
+
)
|
167
|
+
self._communicate_summable_tensor_pair_fn = (
|
168
|
+
CommunicateSummableTensorPairFn.get_fn(
|
169
|
+
hidden_states_input_mode=self.layer_scatter_modes.mlp_mode,
|
170
|
+
residual_input_mode=self.layer_scatter_modes.middle_residual_mode,
|
171
|
+
output_mode=self.layer_scatter_modes.layer_output_mode,
|
172
|
+
context=self._context,
|
173
|
+
)
|
174
|
+
)
|
147
175
|
|
148
176
|
def prepare_attn(
|
149
177
|
self,
|
@@ -160,12 +188,10 @@ class LayerCommunicator:
|
|
160
188
|
else:
|
161
189
|
hidden_states, residual = self.input_layernorm(hidden_states, residual)
|
162
190
|
|
163
|
-
hidden_states =
|
191
|
+
hidden_states = self._communicate_simple_fn(
|
164
192
|
hidden_states=hidden_states,
|
165
193
|
forward_batch=forward_batch,
|
166
|
-
|
167
|
-
output_mode=self.layer_scatter_modes.attn_mode,
|
168
|
-
context=self._compute_context(forward_batch),
|
194
|
+
context=self._context,
|
169
195
|
)
|
170
196
|
|
171
197
|
return hidden_states, residual
|
@@ -176,16 +202,12 @@ class LayerCommunicator:
|
|
176
202
|
residual: torch.Tensor,
|
177
203
|
forward_batch: ForwardBatch,
|
178
204
|
):
|
179
|
-
return
|
205
|
+
return self._communicate_with_all_reduce_and_layer_norm_fn(
|
180
206
|
hidden_states=hidden_states,
|
181
207
|
residual=residual,
|
182
208
|
forward_batch=forward_batch,
|
183
|
-
hidden_states_input_mode=self.layer_scatter_modes.attn_mode,
|
184
|
-
residual_input_mode=self.layer_scatter_modes.layer_input_mode,
|
185
|
-
hidden_states_output_mode=self.layer_scatter_modes.mlp_mode,
|
186
|
-
residual_output_mode=self.layer_scatter_modes.middle_residual_mode,
|
187
209
|
layernorm=self.post_attention_layernorm,
|
188
|
-
context=self.
|
210
|
+
context=self._context,
|
189
211
|
)
|
190
212
|
|
191
213
|
def postprocess_layer(
|
@@ -194,58 +216,16 @@ class LayerCommunicator:
|
|
194
216
|
residual: torch.Tensor,
|
195
217
|
forward_batch: ForwardBatch,
|
196
218
|
):
|
197
|
-
return
|
219
|
+
return self._communicate_summable_tensor_pair_fn(
|
198
220
|
hidden_states=hidden_states,
|
199
221
|
residual=residual,
|
200
222
|
forward_batch=forward_batch,
|
201
|
-
|
202
|
-
residual_input_mode=self.layer_scatter_modes.middle_residual_mode,
|
203
|
-
output_mode=self.layer_scatter_modes.layer_output_mode,
|
204
|
-
context=self._compute_context(forward_batch),
|
223
|
+
context=self._context,
|
205
224
|
)
|
206
225
|
|
207
|
-
def _compute_context(self, forward_batch: ForwardBatch):
|
208
|
-
return _Context(
|
209
|
-
num_tokens_of_mode=_compute_num_tokens_of_mode(
|
210
|
-
forward_batch,
|
211
|
-
attn_tp_rank=self.attn_tp_rank,
|
212
|
-
attn_tp_size=self.attn_tp_size,
|
213
|
-
),
|
214
|
-
process_group_sizes=self.process_group_sizes,
|
215
|
-
attn_tp_rank=self.attn_tp_rank,
|
216
|
-
attn_tp_size=self.attn_tp_size,
|
217
|
-
local_attn_dp_size=self.local_attn_dp_size,
|
218
|
-
tp_size=self.tp_size,
|
219
|
-
)
|
220
|
-
|
221
|
-
|
222
|
-
def _compute_num_tokens_of_mode(
|
223
|
-
forward_batch: ForwardBatch, attn_tp_rank: int, attn_tp_size: int
|
224
|
-
):
|
225
|
-
tp_attn_full_num_tokens = forward_batch.input_ids.shape[0]
|
226
|
-
return {
|
227
|
-
ScatterMode.SCATTERED: _torch_tensor_split_len(
|
228
|
-
tp_attn_full_num_tokens, attn_tp_size, attn_tp_rank
|
229
|
-
),
|
230
|
-
ScatterMode.TP_ATTN_FULL: tp_attn_full_num_tokens,
|
231
|
-
ScatterMode.FULL: (
|
232
|
-
forward_batch.gathered_buffer.shape[0]
|
233
|
-
if global_server_args_dict["enable_dp_attention"]
|
234
|
-
else forward_batch.input_ids.shape[0]
|
235
|
-
),
|
236
|
-
}
|
237
|
-
|
238
|
-
|
239
|
-
def _torch_tensor_split_len(tensor_len: int, n: int, output_index: int):
|
240
|
-
if output_index < int(tensor_len % n):
|
241
|
-
return int(tensor_len / n) + 1
|
242
|
-
else:
|
243
|
-
return int(tensor_len / n)
|
244
|
-
|
245
226
|
|
246
227
|
@dataclass
|
247
|
-
class
|
248
|
-
num_tokens_of_mode: Dict["ScatterMode", int]
|
228
|
+
class CommunicateContext:
|
249
229
|
process_group_sizes: Dict["ScatterMode", int]
|
250
230
|
attn_tp_rank: int
|
251
231
|
attn_tp_size: int
|
@@ -255,75 +235,82 @@ class _Context:
|
|
255
235
|
def is_same_group_size(self, a: "ScatterMode", b: "ScatterMode"):
|
256
236
|
return self.process_group_sizes[a] == self.process_group_sizes[b]
|
257
237
|
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
238
|
+
@classmethod
|
239
|
+
def init_new(cls):
|
240
|
+
attn_tp_rank = get_attention_tp_rank()
|
241
|
+
attn_tp_size = get_attention_tp_size()
|
242
|
+
local_attn_dp_size = get_local_attention_dp_size()
|
243
|
+
tp_size = get_tensor_model_parallel_world_size()
|
244
|
+
process_group_sizes = {
|
245
|
+
ScatterMode.SCATTERED: 1,
|
246
|
+
ScatterMode.TP_ATTN_FULL: attn_tp_size,
|
247
|
+
ScatterMode.FULL: tp_size,
|
248
|
+
}
|
249
|
+
return cls(
|
250
|
+
process_group_sizes=process_group_sizes,
|
251
|
+
attn_tp_rank=attn_tp_rank,
|
252
|
+
attn_tp_size=attn_tp_size,
|
253
|
+
local_attn_dp_size=local_attn_dp_size,
|
254
|
+
tp_size=tp_size,
|
274
255
|
)
|
275
256
|
|
276
257
|
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
)
|
284
|
-
def _inner():
|
285
|
-
nonlocal hidden_states
|
286
|
-
|
258
|
+
class CommunicateSimpleFn:
|
259
|
+
@staticmethod
|
260
|
+
def get_fn(
|
261
|
+
input_mode: ScatterMode,
|
262
|
+
output_mode: ScatterMode,
|
263
|
+
context: CommunicateContext,
|
264
|
+
):
|
287
265
|
if context.is_same_group_size(input_mode, output_mode):
|
288
|
-
return
|
266
|
+
return CommunicateSimpleFn._trivial
|
289
267
|
|
290
268
|
if (input_mode == ScatterMode.SCATTERED) and (
|
291
269
|
output_mode == ScatterMode.TP_ATTN_FULL
|
292
270
|
):
|
293
|
-
|
294
|
-
forward_batch.gathered_buffer[: forward_batch.input_ids.shape[0]],
|
295
|
-
hidden_states,
|
296
|
-
)
|
297
|
-
attn_tp_all_gather(
|
298
|
-
list(hidden_states.tensor_split(context.attn_tp_size)),
|
299
|
-
local_hidden_states,
|
300
|
-
)
|
301
|
-
return hidden_states
|
271
|
+
return CommunicateSimpleFn._scattered_to_tp_attn_full
|
302
272
|
|
303
273
|
raise NotImplementedError(f"{input_mode=} {output_mode=}")
|
304
274
|
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
275
|
+
@staticmethod
|
276
|
+
def _trivial(
|
277
|
+
hidden_states: torch.Tensor,
|
278
|
+
forward_batch: ForwardBatch,
|
279
|
+
context: CommunicateContext,
|
280
|
+
) -> torch.Tensor:
|
281
|
+
return hidden_states
|
282
|
+
|
283
|
+
@staticmethod
|
284
|
+
def _scattered_to_tp_attn_full(
|
285
|
+
hidden_states: torch.Tensor,
|
286
|
+
forward_batch: ForwardBatch,
|
287
|
+
context: CommunicateContext,
|
288
|
+
) -> torch.Tensor:
|
289
|
+
hidden_states, local_hidden_states = (
|
290
|
+
forward_batch.gathered_buffer[: forward_batch.input_ids.shape[0]],
|
291
|
+
hidden_states,
|
292
|
+
)
|
293
|
+
attn_tp_all_gather(
|
294
|
+
list(hidden_states.tensor_split(context.attn_tp_size)),
|
295
|
+
local_hidden_states,
|
296
|
+
)
|
297
|
+
return hidden_states
|
298
|
+
|
299
|
+
|
300
|
+
class CommunicateWithAllReduceAndLayerNormFn:
|
320
301
|
"""Besides communication, needs to
|
321
302
|
1. All reduce in tp_attn_group on hidden_states
|
322
303
|
2. Apply layer norm
|
323
304
|
"""
|
324
305
|
|
325
|
-
|
326
|
-
|
306
|
+
@staticmethod
|
307
|
+
def get_fn(
|
308
|
+
hidden_states_input_mode: ScatterMode,
|
309
|
+
residual_input_mode: ScatterMode,
|
310
|
+
hidden_states_output_mode: ScatterMode,
|
311
|
+
residual_output_mode: ScatterMode,
|
312
|
+
context: CommunicateContext,
|
313
|
+
):
|
327
314
|
|
328
315
|
if (
|
329
316
|
context.is_same_group_size(
|
@@ -332,10 +319,7 @@ def _communicate_with_all_reduce_and_layer_norm(
|
|
332
319
|
and context.is_same_group_size(residual_input_mode, residual_output_mode)
|
333
320
|
and context.attn_tp_size == 1
|
334
321
|
):
|
335
|
-
|
336
|
-
if hidden_states.shape[0] != 0:
|
337
|
-
hidden_states, residual = layernorm(hidden_states, residual)
|
338
|
-
return hidden_states, residual
|
322
|
+
return CommunicateWithAllReduceAndLayerNormFn._simple
|
339
323
|
|
340
324
|
if (
|
341
325
|
(hidden_states_input_mode == ScatterMode.TP_ATTN_FULL)
|
@@ -343,21 +327,7 @@ def _communicate_with_all_reduce_and_layer_norm(
|
|
343
327
|
and (hidden_states_output_mode == ScatterMode.FULL)
|
344
328
|
and (residual_output_mode == ScatterMode.TP_ATTN_FULL)
|
345
329
|
):
|
346
|
-
|
347
|
-
if context.attn_tp_rank == 0:
|
348
|
-
hidden_states += residual
|
349
|
-
hidden_states, local_hidden_states = (
|
350
|
-
forward_batch.gathered_buffer,
|
351
|
-
hidden_states,
|
352
|
-
)
|
353
|
-
dp_gather_partial(hidden_states, local_hidden_states, forward_batch)
|
354
|
-
dp_scatter(residual, hidden_states, forward_batch)
|
355
|
-
if hidden_states.shape[0] != 0:
|
356
|
-
hidden_states = layernorm(hidden_states)
|
357
|
-
else:
|
358
|
-
hidden_states = tensor_model_parallel_all_reduce(hidden_states)
|
359
|
-
hidden_states, residual = layernorm(hidden_states, residual)
|
360
|
-
return hidden_states, residual
|
330
|
+
return CommunicateWithAllReduceAndLayerNormFn._gather_hidden_states
|
361
331
|
|
362
332
|
if (
|
363
333
|
(hidden_states_input_mode == ScatterMode.TP_ATTN_FULL)
|
@@ -367,85 +337,181 @@ def _communicate_with_all_reduce_and_layer_norm(
|
|
367
337
|
and (hidden_states_output_mode == ScatterMode.SCATTERED)
|
368
338
|
and (residual_output_mode == ScatterMode.SCATTERED)
|
369
339
|
):
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
residual = residual.tensor_split(context.attn_tp_size)[
|
375
|
-
context.attn_tp_rank
|
376
|
-
]
|
377
|
-
if hidden_states.shape[0] != 0:
|
378
|
-
hidden_states, residual = layernorm(hidden_states, residual)
|
379
|
-
return hidden_states, residual
|
340
|
+
return partial(
|
341
|
+
CommunicateWithAllReduceAndLayerNormFn._scatter_hidden_states_and_residual,
|
342
|
+
residual_input_mode=residual_input_mode,
|
343
|
+
)
|
380
344
|
|
381
345
|
raise NotImplementedError(
|
382
346
|
f"{hidden_states_input_mode=} {residual_input_mode=} {residual_output_mode=} {residual_output_mode=}"
|
383
347
|
)
|
384
348
|
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
349
|
+
@staticmethod
|
350
|
+
def _simple(
|
351
|
+
hidden_states: torch.Tensor,
|
352
|
+
residual: torch.Tensor,
|
353
|
+
forward_batch: ForwardBatch,
|
354
|
+
layernorm: torch.nn.Module,
|
355
|
+
context: CommunicateContext,
|
356
|
+
):
|
357
|
+
# TODO move these `if shape != 0` into LayerNorm itself
|
358
|
+
if hidden_states.shape[0] != 0:
|
359
|
+
hidden_states, residual = layernorm(hidden_states, residual)
|
360
|
+
return hidden_states, residual
|
361
|
+
|
362
|
+
@staticmethod
|
363
|
+
def _gather_hidden_states(
|
364
|
+
hidden_states: torch.Tensor,
|
365
|
+
residual: torch.Tensor,
|
366
|
+
forward_batch: ForwardBatch,
|
367
|
+
layernorm: torch.nn.Module,
|
368
|
+
context: CommunicateContext,
|
369
|
+
):
|
370
|
+
if context.local_attn_dp_size != 1:
|
371
|
+
if context.attn_tp_rank == 0:
|
372
|
+
hidden_states += residual
|
373
|
+
hidden_states, local_hidden_states = (
|
374
|
+
forward_batch.gathered_buffer,
|
375
|
+
hidden_states,
|
376
|
+
)
|
377
|
+
dp_gather_partial(hidden_states, local_hidden_states, forward_batch)
|
378
|
+
dp_scatter(residual, hidden_states, forward_batch)
|
379
|
+
if hidden_states.shape[0] != 0:
|
380
|
+
hidden_states = layernorm(hidden_states)
|
381
|
+
else:
|
382
|
+
hidden_states = tensor_model_parallel_all_reduce(hidden_states)
|
383
|
+
hidden_states, residual = layernorm(hidden_states, residual)
|
384
|
+
return hidden_states, residual
|
385
|
+
|
386
|
+
@staticmethod
|
387
|
+
def _scatter_hidden_states_and_residual(
|
388
|
+
hidden_states: torch.Tensor,
|
389
|
+
residual: torch.Tensor,
|
390
|
+
forward_batch: ForwardBatch,
|
391
|
+
layernorm: torch.nn.Module,
|
392
|
+
context: CommunicateContext,
|
393
|
+
*,
|
394
|
+
residual_input_mode,
|
395
|
+
):
|
396
|
+
tensor_list = list(hidden_states.tensor_split(context.attn_tp_size))
|
397
|
+
hidden_states = tensor_list[context.attn_tp_rank]
|
398
|
+
attn_tp_reduce_scatter(hidden_states, tensor_list)
|
399
|
+
if residual_input_mode == ScatterMode.TP_ATTN_FULL:
|
400
|
+
residual = residual.tensor_split(context.attn_tp_size)[context.attn_tp_rank]
|
401
|
+
if hidden_states.shape[0] != 0:
|
402
|
+
hidden_states, residual = layernorm(hidden_states, residual)
|
403
|
+
return hidden_states, residual
|
403
404
|
|
404
|
-
def _inner():
|
405
|
-
nonlocal hidden_states, residual
|
406
405
|
|
406
|
+
class CommunicateSummableTensorPairFn:
|
407
|
+
"""It is allowed to make (hidden_states, residual) := (hidden_states + residual, None) if needed."""
|
408
|
+
|
409
|
+
@classmethod
|
410
|
+
def execute(
|
411
|
+
cls,
|
412
|
+
hidden_states_input_mode,
|
413
|
+
residual_input_mode,
|
414
|
+
output_mode,
|
415
|
+
context,
|
416
|
+
**kwargs,
|
417
|
+
):
|
418
|
+
return cls.get_fn(
|
419
|
+
hidden_states_input_mode=hidden_states_input_mode,
|
420
|
+
residual_input_mode=residual_input_mode,
|
421
|
+
output_mode=output_mode,
|
422
|
+
context=context,
|
423
|
+
)(context=context, **kwargs)
|
424
|
+
|
425
|
+
@staticmethod
|
426
|
+
def get_fn(
|
427
|
+
hidden_states_input_mode: ScatterMode,
|
428
|
+
residual_input_mode: ScatterMode,
|
429
|
+
output_mode: ScatterMode,
|
430
|
+
context: CommunicateContext,
|
431
|
+
):
|
407
432
|
if context.is_same_group_size(
|
408
433
|
hidden_states_input_mode, output_mode
|
409
434
|
) and context.is_same_group_size(residual_input_mode, output_mode):
|
410
|
-
return
|
435
|
+
return CommunicateSummableTensorPairFn._trivial
|
411
436
|
|
412
437
|
if (
|
413
438
|
(hidden_states_input_mode == ScatterMode.FULL)
|
414
439
|
and (residual_input_mode == ScatterMode.TP_ATTN_FULL)
|
415
440
|
and (output_mode == ScatterMode.TP_ATTN_FULL)
|
416
441
|
):
|
417
|
-
|
418
|
-
# important: forward batch.gathered_buffer is used both after scatter and after gather.
|
419
|
-
# be careful about this!
|
420
|
-
hidden_states, global_hidden_states = (
|
421
|
-
forward_batch.gathered_buffer[: forward_batch.input_ids.shape[0]],
|
422
|
-
hidden_states,
|
423
|
-
)
|
424
|
-
dp_scatter(hidden_states, global_hidden_states, forward_batch)
|
425
|
-
return hidden_states, residual
|
442
|
+
return CommunicateSummableTensorPairFn._scatter_hidden_states
|
426
443
|
|
427
444
|
if (
|
428
445
|
(hidden_states_input_mode == ScatterMode.SCATTERED)
|
429
446
|
and (residual_input_mode == ScatterMode.SCATTERED)
|
430
447
|
and (output_mode == ScatterMode.TP_ATTN_FULL)
|
431
448
|
):
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
|
437
|
-
)
|
438
|
-
|
439
|
-
|
440
|
-
local_hidden_states,
|
441
|
-
)
|
442
|
-
return hidden_states, residual
|
449
|
+
return CommunicateSummableTensorPairFn._gather
|
450
|
+
|
451
|
+
if (
|
452
|
+
(hidden_states_input_mode == ScatterMode.TP_ATTN_FULL)
|
453
|
+
and (residual_input_mode == ScatterMode.TP_ATTN_FULL)
|
454
|
+
and (output_mode == ScatterMode.SCATTERED)
|
455
|
+
):
|
456
|
+
return CommunicateSummableTensorPairFn._scatter
|
443
457
|
|
444
458
|
raise NotImplementedError(
|
445
459
|
f"{hidden_states_input_mode=} {residual_input_mode=} {output_mode=}"
|
446
460
|
)
|
447
461
|
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
462
|
+
@staticmethod
|
463
|
+
def _trivial(
|
464
|
+
hidden_states: torch.Tensor,
|
465
|
+
residual: torch.Tensor,
|
466
|
+
forward_batch: ForwardBatch,
|
467
|
+
context: CommunicateContext,
|
468
|
+
):
|
469
|
+
return hidden_states, residual
|
470
|
+
|
471
|
+
@staticmethod
|
472
|
+
def _scatter_hidden_states(
|
473
|
+
hidden_states: torch.Tensor,
|
474
|
+
residual: torch.Tensor,
|
475
|
+
forward_batch: ForwardBatch,
|
476
|
+
context: CommunicateContext,
|
477
|
+
):
|
478
|
+
# TODO(ch-wan): use reduce-scatter in MLP to avoid this scatter
|
479
|
+
# important: forward batch.gathered_buffer is used both after scatter and after gather.
|
480
|
+
# be careful about this!
|
481
|
+
hidden_states, global_hidden_states = (
|
482
|
+
forward_batch.gathered_buffer[: forward_batch.input_ids.shape[0]],
|
483
|
+
hidden_states,
|
484
|
+
)
|
485
|
+
dp_scatter(hidden_states, global_hidden_states, forward_batch)
|
486
|
+
return hidden_states, residual
|
487
|
+
|
488
|
+
@staticmethod
|
489
|
+
def _gather(
|
490
|
+
hidden_states: torch.Tensor,
|
491
|
+
residual: torch.Tensor,
|
492
|
+
forward_batch: ForwardBatch,
|
493
|
+
context: CommunicateContext,
|
494
|
+
):
|
495
|
+
hidden_states += residual
|
496
|
+
residual = None
|
497
|
+
hidden_states, local_hidden_states = (
|
498
|
+
forward_batch.gathered_buffer[: forward_batch.input_ids.shape[0]],
|
499
|
+
hidden_states,
|
500
|
+
)
|
501
|
+
attn_tp_all_gather(
|
502
|
+
list(hidden_states.tensor_split(context.attn_tp_size)),
|
503
|
+
local_hidden_states,
|
504
|
+
)
|
505
|
+
return hidden_states, residual
|
506
|
+
|
507
|
+
@staticmethod
|
508
|
+
def _scatter(
|
509
|
+
hidden_states: torch.Tensor,
|
510
|
+
residual: torch.Tensor,
|
511
|
+
forward_batch: ForwardBatch,
|
512
|
+
context: CommunicateContext,
|
513
|
+
):
|
514
|
+
assert residual is None, "not yet handled residual!=None"
|
515
|
+
tensor_list = list(hidden_states.tensor_split(context.attn_tp_size))
|
516
|
+
hidden_states = tensor_list[context.attn_tp_rank]
|
517
|
+
return hidden_states, residual
|
@@ -98,11 +98,12 @@ def initialize_dp_attention(
|
|
98
98
|
],
|
99
99
|
local_rank,
|
100
100
|
torch.distributed.get_backend(tp_group.device_group),
|
101
|
-
SYNC_TOKEN_IDS_ACROSS_TP,
|
102
|
-
False,
|
103
|
-
False,
|
104
|
-
False,
|
105
|
-
False,
|
101
|
+
use_pynccl=SYNC_TOKEN_IDS_ACROSS_TP,
|
102
|
+
use_pymscclpp=False,
|
103
|
+
use_custom_allreduce=False,
|
104
|
+
use_hpu_communicator=False,
|
105
|
+
use_xpu_communicator=False,
|
106
|
+
use_npu_communicator=False,
|
106
107
|
group_name="attention_tp",
|
107
108
|
)
|
108
109
|
|