sglang 0.5.3rc2__py3-none-any.whl → 0.5.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sglang/bench_one_batch.py +47 -28
- sglang/bench_one_batch_server.py +41 -25
- sglang/bench_serving.py +330 -156
- sglang/check_env.py +1 -1
- sglang/compile_deep_gemm.py +6 -2
- sglang/global_config.py +1 -25
- sglang/lang/api.py +6 -0
- sglang/lang/interpreter.py +1 -0
- sglang/lang/ir.py +13 -0
- sglang/launch_server.py +8 -15
- sglang/profiler.py +18 -1
- sglang/srt/_custom_ops.py +1 -1
- sglang/srt/batch_invariant_ops/batch_invariant_ops.py +4 -6
- sglang/srt/checkpoint_engine/checkpoint_engine_worker.py +142 -0
- sglang/srt/compilation/backend.py +437 -0
- sglang/srt/compilation/compilation_config.py +20 -0
- sglang/srt/compilation/compilation_counter.py +47 -0
- sglang/srt/compilation/compile.py +210 -0
- sglang/srt/compilation/compiler_interface.py +503 -0
- sglang/srt/compilation/cuda_piecewise_backend.py +228 -0
- sglang/srt/compilation/fix_functionalization.py +134 -0
- sglang/srt/compilation/fx_utils.py +83 -0
- sglang/srt/compilation/inductor_pass.py +140 -0
- sglang/srt/compilation/pass_manager.py +66 -0
- sglang/srt/compilation/piecewise_context_manager.py +40 -0
- sglang/srt/compilation/weak_ref_tensor_jit.py +16 -0
- sglang/srt/configs/__init__.py +4 -0
- sglang/srt/configs/deepseek_ocr.py +262 -0
- sglang/srt/configs/deepseekvl2.py +194 -96
- sglang/srt/configs/dots_vlm.py +2 -7
- sglang/srt/configs/falcon_h1.py +13 -64
- sglang/srt/configs/load_config.py +25 -2
- sglang/srt/configs/mamba_utils.py +117 -0
- sglang/srt/configs/model_config.py +134 -23
- sglang/srt/configs/modelopt_config.py +30 -0
- sglang/srt/configs/nemotron_h.py +286 -0
- sglang/srt/configs/olmo3.py +105 -0
- sglang/srt/configs/points_v15_chat.py +29 -0
- sglang/srt/configs/qwen3_next.py +11 -47
- sglang/srt/configs/qwen3_omni.py +613 -0
- sglang/srt/configs/qwen3_vl.py +0 -10
- sglang/srt/connector/remote_instance.py +1 -1
- sglang/srt/constrained/base_grammar_backend.py +5 -1
- sglang/srt/constrained/llguidance_backend.py +5 -0
- sglang/srt/constrained/outlines_backend.py +1 -1
- sglang/srt/constrained/reasoner_grammar_backend.py +9 -6
- sglang/srt/constrained/utils.py +12 -0
- sglang/srt/constrained/xgrammar_backend.py +20 -11
- sglang/srt/disaggregation/ascend/transfer_engine.py +1 -1
- sglang/srt/disaggregation/base/conn.py +17 -4
- sglang/srt/disaggregation/common/conn.py +4 -2
- sglang/srt/disaggregation/decode.py +123 -31
- sglang/srt/disaggregation/decode_kvcache_offload_manager.py +1 -1
- sglang/srt/disaggregation/fake/conn.py +11 -3
- sglang/srt/disaggregation/mooncake/conn.py +157 -19
- sglang/srt/disaggregation/nixl/conn.py +69 -24
- sglang/srt/disaggregation/prefill.py +96 -270
- sglang/srt/distributed/device_communicators/all_reduce_utils.py +4 -4
- sglang/srt/distributed/device_communicators/custom_all_reduce.py +6 -6
- sglang/srt/distributed/device_communicators/pymscclpp.py +2 -2
- sglang/srt/distributed/device_communicators/pynccl.py +24 -12
- sglang/srt/distributed/device_communicators/pynccl_allocator.py +2 -2
- sglang/srt/distributed/device_communicators/symm_mem.py +1 -1
- sglang/srt/distributed/naive_distributed.py +5 -4
- sglang/srt/distributed/parallel_state.py +70 -19
- sglang/srt/elastic_ep/elastic_ep.py +74 -0
- sglang/srt/entrypoints/context.py +3 -2
- sglang/srt/entrypoints/engine.py +66 -66
- sglang/srt/entrypoints/grpc_server.py +431 -234
- sglang/srt/entrypoints/harmony_utils.py +2 -2
- sglang/srt/entrypoints/http_server.py +120 -8
- sglang/srt/entrypoints/http_server_engine.py +1 -7
- sglang/srt/entrypoints/openai/protocol.py +225 -37
- sglang/srt/entrypoints/openai/serving_base.py +49 -2
- sglang/srt/entrypoints/openai/serving_chat.py +29 -74
- sglang/srt/entrypoints/openai/serving_classify.py +204 -0
- sglang/srt/entrypoints/openai/serving_completions.py +15 -1
- sglang/srt/entrypoints/openai/serving_responses.py +5 -2
- sglang/srt/entrypoints/openai/serving_tokenize.py +144 -0
- sglang/srt/environ.py +42 -4
- sglang/srt/eplb/eplb_algorithms/__init__.py +18 -1
- sglang/srt/eplb/eplb_algorithms/deepseek.py +0 -2
- sglang/srt/eplb/eplb_algorithms/elasticity_aware.py +87 -0
- sglang/srt/eplb/expert_distribution.py +3 -4
- sglang/srt/eplb/expert_location_dispatch.py +2 -2
- sglang/srt/eplb/expert_location_updater.py +2 -2
- sglang/srt/function_call/base_format_detector.py +17 -18
- sglang/srt/function_call/function_call_parser.py +18 -14
- sglang/srt/function_call/glm4_moe_detector.py +1 -5
- sglang/srt/function_call/gpt_oss_detector.py +1 -1
- sglang/srt/function_call/json_array_parser.py +0 -2
- sglang/srt/function_call/utils.py +2 -2
- sglang/srt/grpc/compile_proto.py +3 -3
- sglang/srt/{entrypoints → grpc}/grpc_request_manager.py +112 -52
- sglang/srt/grpc/health_servicer.py +189 -0
- sglang/srt/grpc/scheduler_launcher.py +181 -0
- sglang/srt/grpc/sglang_scheduler_pb2.py +78 -70
- sglang/srt/grpc/sglang_scheduler_pb2.pyi +66 -10
- sglang/srt/grpc/sglang_scheduler_pb2_grpc.py +89 -1
- sglang/srt/layers/activation.py +4 -1
- sglang/srt/layers/attention/aiter_backend.py +3 -3
- sglang/srt/layers/attention/ascend_backend.py +17 -1
- sglang/srt/layers/attention/attention_registry.py +43 -23
- sglang/srt/layers/attention/base_attn_backend.py +20 -1
- sglang/srt/layers/attention/double_sparsity_backend.py +2 -2
- sglang/srt/layers/attention/fla/chunk.py +0 -1
- sglang/srt/layers/attention/fla/chunk_o.py +1 -1
- sglang/srt/layers/attention/fla/index.py +0 -2
- sglang/srt/layers/attention/fla/layernorm_gated.py +50 -32
- sglang/srt/layers/attention/fla/utils.py +0 -3
- sglang/srt/layers/attention/fla/wy_fast.py +0 -2
- sglang/srt/layers/attention/flashattention_backend.py +12 -8
- sglang/srt/layers/attention/flashinfer_backend.py +248 -21
- sglang/srt/layers/attention/flashinfer_mla_backend.py +20 -18
- sglang/srt/layers/attention/flashmla_backend.py +2 -2
- sglang/srt/layers/attention/hybrid_attn_backend.py +1 -1
- sglang/srt/layers/attention/hybrid_linear_attn_backend.py +165 -62
- sglang/srt/layers/attention/intel_amx_backend.py +1 -1
- sglang/srt/layers/attention/mamba/causal_conv1d.py +1 -1
- sglang/srt/layers/attention/mamba/causal_conv1d_triton.py +9 -5
- sglang/srt/layers/attention/mamba/mamba.py +189 -241
- sglang/srt/layers/attention/mamba/mamba2_metadata.py +211 -0
- sglang/srt/layers/attention/mamba/mixer2_rms_norm_gated.py +120 -0
- sglang/srt/layers/attention/mamba/ops/ssd_bmm.py +0 -50
- sglang/srt/layers/attention/mamba/ops/ssd_chunk_scan.py +0 -60
- sglang/srt/layers/attention/mamba/ops/ssd_chunk_state.py +0 -111
- sglang/srt/layers/attention/mamba/ops/ssd_combined.py +0 -1
- sglang/srt/layers/attention/mamba/ops/ssd_state_passing.py +0 -11
- sglang/srt/layers/attention/npu_ops/mla_preprocess.py +1 -1
- sglang/srt/layers/attention/nsa/nsa_indexer.py +40 -83
- sglang/srt/layers/attention/nsa/triton_kernel.py +136 -0
- sglang/srt/layers/attention/nsa/utils.py +0 -1
- sglang/srt/layers/attention/nsa_backend.py +404 -90
- sglang/srt/layers/attention/triton_backend.py +208 -34
- sglang/srt/layers/attention/triton_ops/double_sparsity_attention.py +2 -2
- sglang/srt/layers/attention/triton_ops/extend_attention.py +539 -44
- sglang/srt/layers/attention/trtllm_mha_backend.py +2 -2
- sglang/srt/layers/attention/trtllm_mla_backend.py +361 -30
- sglang/srt/layers/attention/utils.py +11 -7
- sglang/srt/layers/attention/vision.py +3 -3
- sglang/srt/layers/attention/xpu_backend.py +1028 -0
- sglang/srt/layers/communicator.py +11 -7
- sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/compile_utils.py +4 -8
- sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/configurer.py +4 -3
- sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/entrypoint.py +3 -3
- sglang/srt/layers/dp_attention.py +17 -0
- sglang/srt/layers/layernorm.py +45 -15
- sglang/srt/layers/linear.py +9 -1
- sglang/srt/layers/logits_processor.py +147 -17
- sglang/srt/layers/modelopt_utils.py +11 -0
- sglang/srt/layers/moe/cutlass_moe.py +0 -2
- sglang/srt/layers/moe/cutlass_w4a8_moe.py +213 -21
- sglang/srt/layers/moe/ep_moe/kernels.py +35 -457
- sglang/srt/layers/moe/ep_moe/layer.py +119 -397
- sglang/srt/layers/moe/flashinfer_cutedsl_moe.py +1 -1
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=128,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=256,N=256,device_name=NVIDIA_B200.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/fused_moe_triton_config.py +11 -3
- sglang/srt/layers/moe/fused_moe_triton/layer.py +76 -70
- sglang/srt/layers/moe/fused_moe_triton/triton_kernels_moe.py +18 -42
- sglang/srt/layers/moe/moe_runner/deep_gemm.py +304 -0
- sglang/srt/layers/moe/moe_runner/runner.py +3 -0
- sglang/srt/layers/moe/moe_runner/triton.py +3 -1
- sglang/srt/layers/moe/rocm_moe_utils.py +0 -1
- sglang/srt/layers/moe/router.py +51 -15
- sglang/srt/layers/moe/token_dispatcher/__init__.py +10 -0
- sglang/srt/layers/moe/token_dispatcher/base.py +1 -1
- sglang/srt/layers/moe/token_dispatcher/deepep.py +110 -97
- sglang/srt/layers/moe/token_dispatcher/mooncake.py +386 -0
- sglang/srt/layers/moe/token_dispatcher/standard.py +46 -0
- sglang/srt/layers/moe/topk.py +3 -2
- sglang/srt/layers/moe/utils.py +17 -1
- sglang/srt/layers/quantization/__init__.py +2 -53
- sglang/srt/layers/quantization/awq.py +183 -6
- sglang/srt/layers/quantization/awq_triton.py +29 -0
- sglang/srt/layers/quantization/base_config.py +20 -1
- sglang/srt/layers/quantization/compressed_tensors/__init__.py +7 -0
- sglang/srt/layers/quantization/compressed_tensors/compressed_tensors.py +20 -49
- sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +421 -70
- sglang/srt/layers/quantization/compressed_tensors/schemes/__init__.py +3 -0
- sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +4 -22
- sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +339 -0
- sglang/srt/layers/quantization/fp8.py +84 -18
- sglang/srt/layers/quantization/fp8_kernel.py +55 -10
- sglang/srt/layers/quantization/fp8_utils.py +42 -14
- sglang/srt/layers/quantization/fpgemm_fp8.py +2 -3
- sglang/srt/layers/quantization/gptq.py +0 -1
- sglang/srt/layers/quantization/int8_kernel.py +18 -2
- sglang/srt/layers/quantization/marlin_utils.py +12 -0
- sglang/srt/layers/quantization/modelopt_quant.py +125 -100
- sglang/srt/layers/quantization/mxfp4.py +5 -30
- sglang/srt/layers/quantization/petit.py +1 -1
- sglang/srt/layers/quantization/quark/quark.py +3 -1
- sglang/srt/layers/quantization/quark/quark_moe.py +3 -3
- sglang/srt/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +0 -7
- sglang/srt/layers/quantization/unquant.py +1 -4
- sglang/srt/layers/quantization/utils.py +0 -1
- sglang/srt/layers/quantization/w4afp8.py +51 -20
- sglang/srt/layers/quantization/w8a8_int8.py +30 -24
- sglang/srt/layers/radix_attention.py +59 -9
- sglang/srt/layers/rotary_embedding.py +673 -16
- sglang/srt/layers/sampler.py +36 -16
- sglang/srt/layers/sparse_pooler.py +98 -0
- sglang/srt/layers/utils.py +0 -1
- sglang/srt/layers/vocab_parallel_embedding.py +4 -1
- sglang/srt/lora/backend/triton_backend.py +0 -1
- sglang/srt/lora/eviction_policy.py +139 -0
- sglang/srt/lora/lora_manager.py +24 -9
- sglang/srt/lora/lora_registry.py +1 -1
- sglang/srt/lora/mem_pool.py +40 -16
- sglang/srt/lora/triton_ops/chunked_sgmv_expand.py +1 -1
- sglang/srt/lora/triton_ops/chunked_sgmv_shrink.py +4 -2
- sglang/srt/managers/cache_controller.py +48 -17
- sglang/srt/managers/data_parallel_controller.py +146 -42
- sglang/srt/managers/detokenizer_manager.py +40 -13
- sglang/srt/managers/io_struct.py +66 -16
- sglang/srt/managers/mm_utils.py +20 -18
- sglang/srt/managers/multi_tokenizer_mixin.py +66 -81
- sglang/srt/managers/overlap_utils.py +96 -19
- sglang/srt/managers/schedule_batch.py +241 -511
- sglang/srt/managers/schedule_policy.py +15 -2
- sglang/srt/managers/scheduler.py +399 -499
- sglang/srt/managers/scheduler_metrics_mixin.py +55 -8
- sglang/srt/managers/scheduler_output_processor_mixin.py +317 -111
- sglang/srt/managers/scheduler_pp_mixin.py +341 -0
- sglang/srt/managers/scheduler_profiler_mixin.py +57 -10
- sglang/srt/managers/scheduler_runtime_checker_mixin.py +217 -0
- sglang/srt/managers/scheduler_update_weights_mixin.py +33 -14
- sglang/srt/managers/tokenizer_communicator_mixin.py +71 -55
- sglang/srt/managers/tokenizer_manager.py +378 -90
- sglang/srt/managers/tp_worker.py +212 -161
- sglang/srt/managers/utils.py +78 -2
- sglang/srt/mem_cache/allocator.py +7 -2
- sglang/srt/mem_cache/allocator_ascend.py +2 -2
- sglang/srt/mem_cache/base_prefix_cache.py +2 -2
- sglang/srt/mem_cache/chunk_cache.py +13 -2
- sglang/srt/mem_cache/common.py +480 -0
- sglang/srt/mem_cache/evict_policy.py +16 -1
- sglang/srt/mem_cache/hicache_storage.py +4 -1
- sglang/srt/mem_cache/hiradix_cache.py +16 -3
- sglang/srt/mem_cache/mamba_radix_cache.py +993 -0
- sglang/srt/mem_cache/memory_pool.py +435 -219
- sglang/srt/mem_cache/memory_pool_host.py +0 -1
- sglang/srt/mem_cache/multimodal_cache.py +0 -1
- sglang/srt/mem_cache/radix_cache.py +53 -19
- sglang/srt/mem_cache/radix_cache_cpp.py +19 -14
- sglang/srt/mem_cache/storage/aibrix_kvcache/aibrix_kvcache_storage.py +8 -2
- sglang/srt/mem_cache/storage/aibrix_kvcache/unit_test.py +1 -13
- sglang/srt/mem_cache/storage/backend_factory.py +2 -2
- sglang/srt/mem_cache/storage/eic/eic_storage.py +5 -6
- sglang/srt/mem_cache/storage/hf3fs/hf3fs_client.py +0 -1
- sglang/srt/mem_cache/storage/hf3fs/storage_hf3fs.py +9 -3
- sglang/srt/mem_cache/storage/lmcache/lmc_radix_cache.py +5 -3
- sglang/srt/mem_cache/storage/mooncake_store/mooncake_store.py +101 -17
- sglang/srt/mem_cache/storage/nixl/hicache_nixl.py +38 -9
- sglang/srt/mem_cache/storage/nixl/nixl_utils.py +1 -1
- sglang/srt/mem_cache/storage/nixl/test_hicache_nixl_storage.py +17 -2
- sglang/srt/mem_cache/swa_radix_cache.py +92 -26
- sglang/srt/metrics/collector.py +31 -0
- sglang/srt/metrics/func_timer.py +1 -1
- sglang/srt/model_executor/cuda_graph_runner.py +43 -5
- sglang/srt/model_executor/forward_batch_info.py +28 -23
- sglang/srt/model_executor/model_runner.py +379 -139
- sglang/srt/model_executor/npu_graph_runner.py +2 -3
- sglang/srt/model_executor/piecewise_cuda_graph_runner.py +539 -0
- sglang/srt/model_loader/__init__.py +1 -1
- sglang/srt/model_loader/loader.py +424 -27
- sglang/srt/model_loader/utils.py +0 -1
- sglang/srt/model_loader/weight_utils.py +47 -28
- sglang/srt/models/apertus.py +2 -3
- sglang/srt/models/arcee.py +2 -2
- sglang/srt/models/bailing_moe.py +13 -52
- sglang/srt/models/bailing_moe_nextn.py +3 -4
- sglang/srt/models/bert.py +1 -1
- sglang/srt/models/deepseek_nextn.py +19 -3
- sglang/srt/models/deepseek_ocr.py +1516 -0
- sglang/srt/models/deepseek_v2.py +273 -98
- sglang/srt/models/dots_ocr.py +0 -2
- sglang/srt/models/dots_vlm.py +0 -1
- sglang/srt/models/dots_vlm_vit.py +1 -1
- sglang/srt/models/falcon_h1.py +13 -19
- sglang/srt/models/gemma3_mm.py +16 -0
- sglang/srt/models/gemma3n_mm.py +1 -2
- sglang/srt/models/glm4_moe.py +14 -37
- sglang/srt/models/glm4_moe_nextn.py +2 -2
- sglang/srt/models/glm4v.py +2 -1
- sglang/srt/models/glm4v_moe.py +5 -5
- sglang/srt/models/gpt_oss.py +5 -5
- sglang/srt/models/grok.py +10 -23
- sglang/srt/models/hunyuan.py +2 -7
- sglang/srt/models/interns1.py +0 -1
- sglang/srt/models/kimi_vl.py +1 -7
- sglang/srt/models/kimi_vl_moonvit.py +3 -1
- sglang/srt/models/llama.py +2 -2
- sglang/srt/models/llama_eagle3.py +1 -1
- sglang/srt/models/longcat_flash.py +5 -22
- sglang/srt/models/longcat_flash_nextn.py +3 -14
- sglang/srt/models/mimo.py +2 -13
- sglang/srt/models/mimo_mtp.py +1 -2
- sglang/srt/models/minicpmo.py +7 -5
- sglang/srt/models/mixtral.py +1 -4
- sglang/srt/models/mllama.py +1 -1
- sglang/srt/models/mllama4.py +13 -3
- sglang/srt/models/nemotron_h.py +511 -0
- sglang/srt/models/olmo2.py +31 -4
- sglang/srt/models/opt.py +5 -5
- sglang/srt/models/phi.py +1 -1
- sglang/srt/models/phi4mm.py +1 -1
- sglang/srt/models/phimoe.py +0 -1
- sglang/srt/models/pixtral.py +0 -3
- sglang/srt/models/points_v15_chat.py +186 -0
- sglang/srt/models/qwen.py +0 -1
- sglang/srt/models/qwen2_5_vl.py +3 -3
- sglang/srt/models/qwen2_audio.py +2 -15
- sglang/srt/models/qwen2_moe.py +15 -12
- sglang/srt/models/qwen2_vl.py +5 -2
- sglang/srt/models/qwen3_moe.py +19 -35
- sglang/srt/models/qwen3_next.py +7 -12
- sglang/srt/models/qwen3_next_mtp.py +3 -4
- sglang/srt/models/qwen3_omni_moe.py +661 -0
- sglang/srt/models/qwen3_vl.py +37 -33
- sglang/srt/models/qwen3_vl_moe.py +57 -185
- sglang/srt/models/roberta.py +55 -3
- sglang/srt/models/sarashina2_vision.py +0 -1
- sglang/srt/models/step3_vl.py +3 -5
- sglang/srt/models/utils.py +11 -1
- sglang/srt/multimodal/processors/base_processor.py +6 -2
- sglang/srt/multimodal/processors/deepseek_ocr.py +37 -0
- sglang/srt/multimodal/processors/deepseek_vl_v2.py +0 -3
- sglang/srt/multimodal/processors/dots_vlm.py +0 -1
- sglang/srt/multimodal/processors/glm4v.py +1 -5
- sglang/srt/multimodal/processors/internvl.py +0 -2
- sglang/srt/multimodal/processors/janus_pro.py +0 -1
- sglang/srt/multimodal/processors/mllama4.py +0 -8
- sglang/srt/multimodal/processors/phi4mm.py +0 -1
- sglang/srt/multimodal/processors/points_v15_chat.py +52 -0
- sglang/srt/multimodal/processors/qwen_vl.py +75 -16
- sglang/srt/multimodal/processors/step3_vl.py +1 -1
- sglang/srt/parser/conversation.py +41 -0
- sglang/srt/parser/reasoning_parser.py +0 -1
- sglang/srt/sampling/custom_logit_processor.py +77 -2
- sglang/srt/sampling/sampling_batch_info.py +17 -22
- sglang/srt/sampling/sampling_params.py +70 -2
- sglang/srt/server_args.py +577 -73
- sglang/srt/server_args_config_parser.py +1 -1
- sglang/srt/single_batch_overlap.py +38 -28
- sglang/srt/speculative/base_spec_worker.py +34 -0
- sglang/srt/speculative/draft_utils.py +226 -0
- sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +24 -7
- sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py +23 -2
- sglang/srt/speculative/eagle_info.py +57 -18
- sglang/srt/speculative/eagle_info_v2.py +458 -0
- sglang/srt/speculative/eagle_utils.py +138 -0
- sglang/srt/speculative/eagle_worker.py +83 -280
- sglang/srt/speculative/eagle_worker_v2.py +702 -0
- sglang/srt/speculative/{ngram_utils.py → ngram_info.py} +14 -9
- sglang/srt/speculative/ngram_worker.py +12 -11
- sglang/srt/speculative/spec_info.py +2 -0
- sglang/srt/speculative/spec_utils.py +38 -3
- sglang/srt/speculative/standalone_worker.py +4 -14
- sglang/srt/tokenizer/tiktoken_tokenizer.py +2 -2
- sglang/srt/two_batch_overlap.py +28 -14
- sglang/srt/utils/__init__.py +1 -1
- sglang/srt/{bench_utils.py → utils/bench_utils.py} +4 -2
- sglang/srt/utils/common.py +192 -47
- sglang/srt/utils/hf_transformers_utils.py +40 -17
- sglang/srt/{host_shared_memory.py → utils/host_shared_memory.py} +0 -1
- sglang/srt/{offloader.py → utils/offloader.py} +4 -4
- sglang/srt/utils/profile_merger.py +199 -0
- sglang/test/attention/test_flashattn_backend.py +1 -1
- sglang/test/attention/test_flashattn_mla_backend.py +0 -1
- sglang/test/attention/test_prefix_chunk_info.py +0 -2
- sglang/test/attention/test_trtllm_mla_backend.py +221 -53
- sglang/test/few_shot_gsm8k_engine.py +2 -4
- sglang/test/kit_matched_stop.py +157 -0
- sglang/test/longbench_v2/__init__.py +1 -0
- sglang/test/longbench_v2/test_longbench_v2_eval.py +238 -0
- sglang/test/longbench_v2/validate_longbench_v2.py +337 -0
- sglang/test/longbench_v2/validate_longbench_v2_standalone.py +306 -0
- sglang/test/run_eval.py +41 -0
- sglang/test/runners.py +2 -0
- sglang/test/send_one.py +42 -7
- sglang/test/simple_eval_common.py +3 -0
- sglang/test/simple_eval_gpqa.py +0 -1
- sglang/test/simple_eval_humaneval.py +0 -3
- sglang/test/simple_eval_longbench_v2.py +344 -0
- sglang/test/test_block_fp8.py +1 -2
- sglang/test/test_block_fp8_deep_gemm_blackwell.py +0 -1
- sglang/test/test_cutlass_moe.py +1 -2
- sglang/test/test_cutlass_w4a8_moe.py +10 -20
- sglang/test/test_deterministic.py +232 -99
- sglang/test/test_deterministic_utils.py +73 -0
- sglang/test/test_disaggregation_utils.py +81 -0
- sglang/test/test_marlin_moe.py +0 -1
- sglang/test/test_utils.py +85 -20
- sglang/version.py +1 -1
- {sglang-0.5.3rc2.dist-info → sglang-0.5.4.dist-info}/METADATA +45 -33
- {sglang-0.5.3rc2.dist-info → sglang-0.5.4.dist-info}/RECORD +404 -345
- sglang/srt/layers/attention/mamba/mamba_utils.py +0 -81
- sglang/srt/managers/tp_worker_overlap_thread.py +0 -311
- sglang/srt/speculative/build_eagle_tree.py +0 -427
- sglang/test/test_block_fp8_ep.py +0 -358
- /sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/__init__.py +0 -0
- /sglang/srt/{aio_rwlock.py → utils/aio_rwlock.py} +0 -0
- /sglang/srt/{torch_memory_saver_adapter.py → utils/torch_memory_saver_adapter.py} +0 -0
- {sglang-0.5.3rc2.dist-info → sglang-0.5.4.dist-info}/WHEEL +0 -0
- {sglang-0.5.3rc2.dist-info → sglang-0.5.4.dist-info}/licenses/LICENSE +0 -0
- {sglang-0.5.3rc2.dist-info → sglang-0.5.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
{
|
|
2
|
+
"1": {
|
|
3
|
+
"BLOCK_SIZE_M": 16,
|
|
4
|
+
"BLOCK_SIZE_N": 128,
|
|
5
|
+
"BLOCK_SIZE_K": 128,
|
|
6
|
+
"GROUP_SIZE_M": 1,
|
|
7
|
+
"num_warps": 4,
|
|
8
|
+
"num_stages": 4
|
|
9
|
+
},
|
|
10
|
+
"2": {
|
|
11
|
+
"BLOCK_SIZE_M": 16,
|
|
12
|
+
"BLOCK_SIZE_N": 128,
|
|
13
|
+
"BLOCK_SIZE_K": 128,
|
|
14
|
+
"GROUP_SIZE_M": 1,
|
|
15
|
+
"num_warps": 4,
|
|
16
|
+
"num_stages": 4
|
|
17
|
+
},
|
|
18
|
+
"4": {
|
|
19
|
+
"BLOCK_SIZE_M": 16,
|
|
20
|
+
"BLOCK_SIZE_N": 128,
|
|
21
|
+
"BLOCK_SIZE_K": 128,
|
|
22
|
+
"GROUP_SIZE_M": 1,
|
|
23
|
+
"num_warps": 4,
|
|
24
|
+
"num_stages": 4
|
|
25
|
+
},
|
|
26
|
+
"8": {
|
|
27
|
+
"BLOCK_SIZE_M": 16,
|
|
28
|
+
"BLOCK_SIZE_N": 128,
|
|
29
|
+
"BLOCK_SIZE_K": 128,
|
|
30
|
+
"GROUP_SIZE_M": 1,
|
|
31
|
+
"num_warps": 4,
|
|
32
|
+
"num_stages": 4
|
|
33
|
+
},
|
|
34
|
+
"16": {
|
|
35
|
+
"BLOCK_SIZE_M": 16,
|
|
36
|
+
"BLOCK_SIZE_N": 128,
|
|
37
|
+
"BLOCK_SIZE_K": 128,
|
|
38
|
+
"GROUP_SIZE_M": 1,
|
|
39
|
+
"num_warps": 4,
|
|
40
|
+
"num_stages": 3
|
|
41
|
+
},
|
|
42
|
+
"24": {
|
|
43
|
+
"BLOCK_SIZE_M": 16,
|
|
44
|
+
"BLOCK_SIZE_N": 128,
|
|
45
|
+
"BLOCK_SIZE_K": 128,
|
|
46
|
+
"GROUP_SIZE_M": 1,
|
|
47
|
+
"num_warps": 4,
|
|
48
|
+
"num_stages": 3
|
|
49
|
+
},
|
|
50
|
+
"32": {
|
|
51
|
+
"BLOCK_SIZE_M": 16,
|
|
52
|
+
"BLOCK_SIZE_N": 128,
|
|
53
|
+
"BLOCK_SIZE_K": 128,
|
|
54
|
+
"GROUP_SIZE_M": 1,
|
|
55
|
+
"num_warps": 4,
|
|
56
|
+
"num_stages": 3
|
|
57
|
+
},
|
|
58
|
+
"48": {
|
|
59
|
+
"BLOCK_SIZE_M": 16,
|
|
60
|
+
"BLOCK_SIZE_N": 128,
|
|
61
|
+
"BLOCK_SIZE_K": 128,
|
|
62
|
+
"GROUP_SIZE_M": 1,
|
|
63
|
+
"num_warps": 4,
|
|
64
|
+
"num_stages": 3
|
|
65
|
+
},
|
|
66
|
+
"64": {
|
|
67
|
+
"BLOCK_SIZE_M": 16,
|
|
68
|
+
"BLOCK_SIZE_N": 128,
|
|
69
|
+
"BLOCK_SIZE_K": 128,
|
|
70
|
+
"GROUP_SIZE_M": 1,
|
|
71
|
+
"num_warps": 4,
|
|
72
|
+
"num_stages": 3
|
|
73
|
+
},
|
|
74
|
+
"96": {
|
|
75
|
+
"BLOCK_SIZE_M": 16,
|
|
76
|
+
"BLOCK_SIZE_N": 128,
|
|
77
|
+
"BLOCK_SIZE_K": 128,
|
|
78
|
+
"GROUP_SIZE_M": 1,
|
|
79
|
+
"num_warps": 4,
|
|
80
|
+
"num_stages": 3
|
|
81
|
+
},
|
|
82
|
+
"128": {
|
|
83
|
+
"BLOCK_SIZE_M": 16,
|
|
84
|
+
"BLOCK_SIZE_N": 128,
|
|
85
|
+
"BLOCK_SIZE_K": 128,
|
|
86
|
+
"GROUP_SIZE_M": 1,
|
|
87
|
+
"num_warps": 4,
|
|
88
|
+
"num_stages": 3
|
|
89
|
+
},
|
|
90
|
+
"256": {
|
|
91
|
+
"BLOCK_SIZE_M": 16,
|
|
92
|
+
"BLOCK_SIZE_N": 128,
|
|
93
|
+
"BLOCK_SIZE_K": 128,
|
|
94
|
+
"GROUP_SIZE_M": 1,
|
|
95
|
+
"num_warps": 4,
|
|
96
|
+
"num_stages": 3
|
|
97
|
+
},
|
|
98
|
+
"512": {
|
|
99
|
+
"BLOCK_SIZE_M": 64,
|
|
100
|
+
"BLOCK_SIZE_N": 128,
|
|
101
|
+
"BLOCK_SIZE_K": 128,
|
|
102
|
+
"GROUP_SIZE_M": 1,
|
|
103
|
+
"num_warps": 8,
|
|
104
|
+
"num_stages": 3
|
|
105
|
+
},
|
|
106
|
+
"1024": {
|
|
107
|
+
"BLOCK_SIZE_M": 64,
|
|
108
|
+
"BLOCK_SIZE_N": 128,
|
|
109
|
+
"BLOCK_SIZE_K": 128,
|
|
110
|
+
"GROUP_SIZE_M": 16,
|
|
111
|
+
"num_warps": 4,
|
|
112
|
+
"num_stages": 3
|
|
113
|
+
},
|
|
114
|
+
"1536": {
|
|
115
|
+
"BLOCK_SIZE_M": 128,
|
|
116
|
+
"BLOCK_SIZE_N": 128,
|
|
117
|
+
"BLOCK_SIZE_K": 128,
|
|
118
|
+
"GROUP_SIZE_M": 1,
|
|
119
|
+
"num_warps": 8,
|
|
120
|
+
"num_stages": 3
|
|
121
|
+
},
|
|
122
|
+
"2048": {
|
|
123
|
+
"BLOCK_SIZE_M": 64,
|
|
124
|
+
"BLOCK_SIZE_N": 128,
|
|
125
|
+
"BLOCK_SIZE_K": 64,
|
|
126
|
+
"GROUP_SIZE_M": 16,
|
|
127
|
+
"num_warps": 4,
|
|
128
|
+
"num_stages": 3
|
|
129
|
+
},
|
|
130
|
+
"3072": {
|
|
131
|
+
"BLOCK_SIZE_M": 128,
|
|
132
|
+
"BLOCK_SIZE_N": 128,
|
|
133
|
+
"BLOCK_SIZE_K": 64,
|
|
134
|
+
"GROUP_SIZE_M": 16,
|
|
135
|
+
"num_warps": 8,
|
|
136
|
+
"num_stages": 4
|
|
137
|
+
},
|
|
138
|
+
"4096": {
|
|
139
|
+
"BLOCK_SIZE_M": 128,
|
|
140
|
+
"BLOCK_SIZE_N": 128,
|
|
141
|
+
"BLOCK_SIZE_K": 128,
|
|
142
|
+
"GROUP_SIZE_M": 16,
|
|
143
|
+
"num_warps": 8,
|
|
144
|
+
"num_stages": 3
|
|
145
|
+
}
|
|
146
|
+
}
|
sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=256,N=256,device_name=NVIDIA_B200.json
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
{
|
|
2
|
+
"1": {
|
|
3
|
+
"BLOCK_SIZE_M": 16,
|
|
4
|
+
"BLOCK_SIZE_N": 64,
|
|
5
|
+
"BLOCK_SIZE_K": 64,
|
|
6
|
+
"GROUP_SIZE_M": 1,
|
|
7
|
+
"num_warps": 4,
|
|
8
|
+
"num_stages": 5
|
|
9
|
+
},
|
|
10
|
+
"2": {
|
|
11
|
+
"BLOCK_SIZE_M": 16,
|
|
12
|
+
"BLOCK_SIZE_N": 64,
|
|
13
|
+
"BLOCK_SIZE_K": 64,
|
|
14
|
+
"GROUP_SIZE_M": 1,
|
|
15
|
+
"num_warps": 4,
|
|
16
|
+
"num_stages": 5
|
|
17
|
+
},
|
|
18
|
+
"4": {
|
|
19
|
+
"BLOCK_SIZE_M": 16,
|
|
20
|
+
"BLOCK_SIZE_N": 64,
|
|
21
|
+
"BLOCK_SIZE_K": 64,
|
|
22
|
+
"GROUP_SIZE_M": 1,
|
|
23
|
+
"num_warps": 4,
|
|
24
|
+
"num_stages": 5
|
|
25
|
+
},
|
|
26
|
+
"8": {
|
|
27
|
+
"BLOCK_SIZE_M": 16,
|
|
28
|
+
"BLOCK_SIZE_N": 128,
|
|
29
|
+
"BLOCK_SIZE_K": 128,
|
|
30
|
+
"GROUP_SIZE_M": 1,
|
|
31
|
+
"num_warps": 8,
|
|
32
|
+
"num_stages": 3
|
|
33
|
+
},
|
|
34
|
+
"16": {
|
|
35
|
+
"BLOCK_SIZE_M": 16,
|
|
36
|
+
"BLOCK_SIZE_N": 64,
|
|
37
|
+
"BLOCK_SIZE_K": 64,
|
|
38
|
+
"GROUP_SIZE_M": 1,
|
|
39
|
+
"num_warps": 4,
|
|
40
|
+
"num_stages": 3
|
|
41
|
+
},
|
|
42
|
+
"24": {
|
|
43
|
+
"BLOCK_SIZE_M": 16,
|
|
44
|
+
"BLOCK_SIZE_N": 256,
|
|
45
|
+
"BLOCK_SIZE_K": 128,
|
|
46
|
+
"GROUP_SIZE_M": 1,
|
|
47
|
+
"num_warps": 8,
|
|
48
|
+
"num_stages": 2
|
|
49
|
+
},
|
|
50
|
+
"32": {
|
|
51
|
+
"BLOCK_SIZE_M": 16,
|
|
52
|
+
"BLOCK_SIZE_N": 128,
|
|
53
|
+
"BLOCK_SIZE_K": 128,
|
|
54
|
+
"GROUP_SIZE_M": 1,
|
|
55
|
+
"num_warps": 8,
|
|
56
|
+
"num_stages": 3
|
|
57
|
+
},
|
|
58
|
+
"48": {
|
|
59
|
+
"BLOCK_SIZE_M": 16,
|
|
60
|
+
"BLOCK_SIZE_N": 128,
|
|
61
|
+
"BLOCK_SIZE_K": 128,
|
|
62
|
+
"GROUP_SIZE_M": 1,
|
|
63
|
+
"num_warps": 8,
|
|
64
|
+
"num_stages": 3
|
|
65
|
+
},
|
|
66
|
+
"64": {
|
|
67
|
+
"BLOCK_SIZE_M": 16,
|
|
68
|
+
"BLOCK_SIZE_N": 128,
|
|
69
|
+
"BLOCK_SIZE_K": 128,
|
|
70
|
+
"GROUP_SIZE_M": 1,
|
|
71
|
+
"num_warps": 8,
|
|
72
|
+
"num_stages": 3
|
|
73
|
+
},
|
|
74
|
+
"96": {
|
|
75
|
+
"BLOCK_SIZE_M": 16,
|
|
76
|
+
"BLOCK_SIZE_N": 64,
|
|
77
|
+
"BLOCK_SIZE_K": 128,
|
|
78
|
+
"GROUP_SIZE_M": 1,
|
|
79
|
+
"num_warps": 4,
|
|
80
|
+
"num_stages": 2
|
|
81
|
+
},
|
|
82
|
+
"128": {
|
|
83
|
+
"BLOCK_SIZE_M": 16,
|
|
84
|
+
"BLOCK_SIZE_N": 64,
|
|
85
|
+
"BLOCK_SIZE_K": 128,
|
|
86
|
+
"GROUP_SIZE_M": 1,
|
|
87
|
+
"num_warps": 4,
|
|
88
|
+
"num_stages": 2
|
|
89
|
+
},
|
|
90
|
+
"256": {
|
|
91
|
+
"BLOCK_SIZE_M": 16,
|
|
92
|
+
"BLOCK_SIZE_N": 64,
|
|
93
|
+
"BLOCK_SIZE_K": 128,
|
|
94
|
+
"GROUP_SIZE_M": 1,
|
|
95
|
+
"num_warps": 4,
|
|
96
|
+
"num_stages": 2
|
|
97
|
+
},
|
|
98
|
+
"512": {
|
|
99
|
+
"BLOCK_SIZE_M": 32,
|
|
100
|
+
"BLOCK_SIZE_N": 128,
|
|
101
|
+
"BLOCK_SIZE_K": 64,
|
|
102
|
+
"GROUP_SIZE_M": 1,
|
|
103
|
+
"num_warps": 4,
|
|
104
|
+
"num_stages": 3
|
|
105
|
+
},
|
|
106
|
+
"1024": {
|
|
107
|
+
"BLOCK_SIZE_M": 64,
|
|
108
|
+
"BLOCK_SIZE_N": 128,
|
|
109
|
+
"BLOCK_SIZE_K": 64,
|
|
110
|
+
"GROUP_SIZE_M": 1,
|
|
111
|
+
"num_warps": 8,
|
|
112
|
+
"num_stages": 3
|
|
113
|
+
},
|
|
114
|
+
"1536": {
|
|
115
|
+
"BLOCK_SIZE_M": 64,
|
|
116
|
+
"BLOCK_SIZE_N": 128,
|
|
117
|
+
"BLOCK_SIZE_K": 64,
|
|
118
|
+
"GROUP_SIZE_M": 1,
|
|
119
|
+
"num_warps": 8,
|
|
120
|
+
"num_stages": 3
|
|
121
|
+
},
|
|
122
|
+
"2048": {
|
|
123
|
+
"BLOCK_SIZE_M": 128,
|
|
124
|
+
"BLOCK_SIZE_N": 256,
|
|
125
|
+
"BLOCK_SIZE_K": 64,
|
|
126
|
+
"GROUP_SIZE_M": 1,
|
|
127
|
+
"num_warps": 4,
|
|
128
|
+
"num_stages": 2
|
|
129
|
+
},
|
|
130
|
+
"3072": {
|
|
131
|
+
"BLOCK_SIZE_M": 128,
|
|
132
|
+
"BLOCK_SIZE_N": 256,
|
|
133
|
+
"BLOCK_SIZE_K": 64,
|
|
134
|
+
"GROUP_SIZE_M": 1,
|
|
135
|
+
"num_warps": 4,
|
|
136
|
+
"num_stages": 2
|
|
137
|
+
},
|
|
138
|
+
"4096": {
|
|
139
|
+
"BLOCK_SIZE_M": 128,
|
|
140
|
+
"BLOCK_SIZE_N": 256,
|
|
141
|
+
"BLOCK_SIZE_K": 64,
|
|
142
|
+
"GROUP_SIZE_M": 1,
|
|
143
|
+
"num_warps": 4,
|
|
144
|
+
"num_stages": 2
|
|
145
|
+
}
|
|
146
|
+
}
|
|
@@ -16,14 +16,19 @@ _is_hip = is_hip()
|
|
|
16
16
|
|
|
17
17
|
|
|
18
18
|
def get_config_file_name(
|
|
19
|
-
E: int,
|
|
19
|
+
E: int,
|
|
20
|
+
N: int,
|
|
21
|
+
dtype: Optional[str],
|
|
22
|
+
block_shape: Optional[int] = None,
|
|
23
|
+
per_channel_quant: bool = False,
|
|
20
24
|
) -> str:
|
|
21
25
|
device_name = get_device_name().replace(" ", "_")
|
|
22
26
|
dtype_selector = "" if not dtype else f",dtype={dtype}"
|
|
23
27
|
block_shape_selector = (
|
|
24
28
|
"" if not block_shape or not all(block_shape) else f",block_shape={block_shape}"
|
|
25
29
|
)
|
|
26
|
-
|
|
30
|
+
per_channel_quant_selector = ",per_channel_quant=True" if per_channel_quant else ""
|
|
31
|
+
return f"E={E},N={N},device_name={device_name}{dtype_selector}{block_shape_selector}{per_channel_quant_selector}.json"
|
|
27
32
|
|
|
28
33
|
|
|
29
34
|
@functools.lru_cache
|
|
@@ -33,6 +38,7 @@ def get_moe_configs(
|
|
|
33
38
|
dtype: Optional[str],
|
|
34
39
|
block_n: Optional[int] = 0,
|
|
35
40
|
block_k: Optional[int] = 0,
|
|
41
|
+
per_channel_quant: bool = False,
|
|
36
42
|
) -> Optional[Dict[int, Any]]:
|
|
37
43
|
"""
|
|
38
44
|
Return optimized configurations for the fused MoE kernel.
|
|
@@ -47,7 +53,9 @@ def get_moe_configs(
|
|
|
47
53
|
|
|
48
54
|
# First look up if an optimized configuration is available in the configs
|
|
49
55
|
# directory
|
|
50
|
-
json_file_name = get_config_file_name(
|
|
56
|
+
json_file_name = get_config_file_name(
|
|
57
|
+
E, N, dtype, [block_n, block_k], per_channel_quant
|
|
58
|
+
)
|
|
51
59
|
|
|
52
60
|
# We found that using the fused_moe_kernel config from Triton 3.1.0 with Triton 3.2.0 results in negative performance gains,
|
|
53
61
|
# so we also include the Triton version as a key for finding the fused_moe_kernel config to achieve the best performance.
|
|
@@ -11,14 +11,19 @@ from sglang.srt.distributed import (
|
|
|
11
11
|
get_moe_expert_parallel_world_size,
|
|
12
12
|
get_moe_tensor_parallel_rank,
|
|
13
13
|
get_moe_tensor_parallel_world_size,
|
|
14
|
+
get_tp_group,
|
|
14
15
|
tensor_model_parallel_all_reduce,
|
|
15
16
|
)
|
|
16
17
|
from sglang.srt.eplb.expert_location import get_global_expert_location_metadata
|
|
17
18
|
from sglang.srt.layers.moe import (
|
|
18
19
|
MoeRunnerConfig,
|
|
20
|
+
get_deepep_mode,
|
|
21
|
+
get_moe_a2a_backend,
|
|
19
22
|
get_moe_runner_backend,
|
|
20
23
|
should_use_flashinfer_trtllm_moe,
|
|
21
24
|
)
|
|
25
|
+
from sglang.srt.layers.moe.token_dispatcher import CombineInput, DispatchOutput
|
|
26
|
+
from sglang.srt.layers.moe.token_dispatcher.base import BaseDispatcher
|
|
22
27
|
from sglang.srt.layers.moe.token_dispatcher.standard import (
|
|
23
28
|
StandardDispatcher,
|
|
24
29
|
StandardDispatchOutput,
|
|
@@ -27,31 +32,28 @@ from sglang.srt.layers.moe.topk import TopKOutput, TopKOutputChecker
|
|
|
27
32
|
from sglang.srt.layers.quantization.base_config import (
|
|
28
33
|
FusedMoEMethodBase,
|
|
29
34
|
QuantizationConfig,
|
|
30
|
-
|
|
35
|
+
)
|
|
36
|
+
from sglang.srt.layers.quantization.compressed_tensors.compressed_tensors_moe import (
|
|
37
|
+
CompressedTensorsWNA16AMXEPMoEMethod,
|
|
38
|
+
CompressedTensorsWNA16AMXMoEMethod,
|
|
39
|
+
CompressedTensorsWNA16MoEMethod,
|
|
31
40
|
)
|
|
32
41
|
from sglang.srt.layers.quantization.fp8 import Fp8MoEMethod
|
|
33
42
|
from sglang.srt.layers.quantization.modelopt_quant import ModelOptNvFp4FusedMoEMethod
|
|
34
43
|
from sglang.srt.layers.quantization.unquant import UnquantizedFusedMoEMethod
|
|
35
|
-
from sglang.srt.managers.schedule_batch import global_server_args_dict
|
|
36
44
|
from sglang.srt.model_loader.weight_utils import narrow_padded_param_and_loaded_weight
|
|
45
|
+
from sglang.srt.two_batch_overlap import MaybeTboDeepEPDispatcher
|
|
37
46
|
from sglang.srt.utils import (
|
|
38
47
|
cpu_has_amx_support,
|
|
39
48
|
get_bool_env_var,
|
|
40
49
|
is_cpu,
|
|
41
50
|
is_flashinfer_available,
|
|
42
51
|
is_hip,
|
|
43
|
-
next_power_of_2,
|
|
44
52
|
round_up,
|
|
45
53
|
)
|
|
46
54
|
|
|
47
55
|
if is_flashinfer_available():
|
|
48
|
-
from flashinfer import
|
|
49
|
-
RoutingMethodType,
|
|
50
|
-
fp4_quantize,
|
|
51
|
-
reorder_rows_for_gated_act_gemm,
|
|
52
|
-
shuffle_matrix_a,
|
|
53
|
-
shuffle_matrix_sf_a,
|
|
54
|
-
)
|
|
56
|
+
from flashinfer import RoutingMethodType, fp4_quantize
|
|
55
57
|
|
|
56
58
|
_is_hip = is_hip()
|
|
57
59
|
_is_cpu_amx_available = cpu_has_amx_support()
|
|
@@ -69,14 +71,25 @@ if should_use_flashinfer_trtllm_moe():
|
|
|
69
71
|
logger = logging.getLogger(__name__)
|
|
70
72
|
|
|
71
73
|
|
|
72
|
-
def
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
74
|
+
def create_moe_dispatcher(moe_runner_config: MoeRunnerConfig) -> BaseDispatcher:
|
|
75
|
+
a2a_backend = get_moe_a2a_backend()
|
|
76
|
+
if a2a_backend.is_none():
|
|
77
|
+
return StandardDispatcher(moe_runner_config)
|
|
78
|
+
elif a2a_backend.is_deepep() or a2a_backend.is_mooncake():
|
|
79
|
+
return MaybeTboDeepEPDispatcher(
|
|
80
|
+
group=get_tp_group().device_group,
|
|
81
|
+
router_topk=moe_runner_config.top_k,
|
|
82
|
+
permute_fusion=True,
|
|
83
|
+
num_experts=moe_runner_config.num_experts,
|
|
84
|
+
num_local_experts=moe_runner_config.num_local_experts,
|
|
85
|
+
hidden_size=moe_runner_config.hidden_size,
|
|
86
|
+
params_dtype=moe_runner_config.params_dtype,
|
|
87
|
+
deepep_mode=get_deepep_mode(),
|
|
88
|
+
async_finish=True,
|
|
89
|
+
return_recv_hook=True,
|
|
90
|
+
)
|
|
91
|
+
else:
|
|
92
|
+
raise NotImplementedError(f"Unsupported a2a backend: {a2a_backend}")
|
|
80
93
|
|
|
81
94
|
|
|
82
95
|
class FusedMoeWeightScaleSupported(Enum):
|
|
@@ -131,7 +144,6 @@ class FusedMoE(torch.nn.Module):
|
|
|
131
144
|
with_bias=False,
|
|
132
145
|
):
|
|
133
146
|
super().__init__()
|
|
134
|
-
|
|
135
147
|
if params_dtype is None:
|
|
136
148
|
params_dtype = torch.get_default_dtype()
|
|
137
149
|
|
|
@@ -140,8 +152,6 @@ class FusedMoE(torch.nn.Module):
|
|
|
140
152
|
self.hidden_size = hidden_size
|
|
141
153
|
self.num_experts = num_experts
|
|
142
154
|
self.num_fused_shared_experts = num_fused_shared_experts
|
|
143
|
-
self.expert_map_cpu = None
|
|
144
|
-
self.expert_map_gpu = None
|
|
145
155
|
|
|
146
156
|
enable_flashinfer_cutlass_moe = get_moe_runner_backend().is_flashinfer_cutlass()
|
|
147
157
|
|
|
@@ -156,20 +166,6 @@ class FusedMoE(torch.nn.Module):
|
|
|
156
166
|
self.moe_tp_rank = get_moe_tensor_parallel_rank()
|
|
157
167
|
assert num_experts % self.moe_ep_size == 0
|
|
158
168
|
self.num_local_experts = num_experts // self.moe_ep_size
|
|
159
|
-
self.start_expert_id = self.moe_ep_rank * self.num_local_experts
|
|
160
|
-
self.end_expert_id = self.start_expert_id + self.num_local_experts - 1
|
|
161
|
-
if self.moe_ep_size > 1:
|
|
162
|
-
# TODO(ch-wan): support shared experts fusion
|
|
163
|
-
# Create a tensor of size num_experts filled with -1
|
|
164
|
-
self.expert_map_cpu = torch.full(
|
|
165
|
-
(self.num_experts,), -1, dtype=torch.int32, device="cpu"
|
|
166
|
-
)
|
|
167
|
-
# Create a expert map for the local experts
|
|
168
|
-
self.expert_map_cpu[
|
|
169
|
-
self.moe_ep_rank
|
|
170
|
-
* self.num_local_experts : (self.moe_ep_rank + 1)
|
|
171
|
-
* self.num_local_experts
|
|
172
|
-
] = torch.arange(0, self.num_local_experts, dtype=torch.int32, device="cpu")
|
|
173
169
|
|
|
174
170
|
assert intermediate_size % self.moe_tp_size == 0
|
|
175
171
|
self.intermediate_size_per_partition = intermediate_size // self.moe_tp_size
|
|
@@ -207,15 +203,11 @@ class FusedMoE(torch.nn.Module):
|
|
|
207
203
|
gemm1_clamp_limit=gemm1_clamp_limit,
|
|
208
204
|
)
|
|
209
205
|
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
self.quant_method: FusedMoEMethodBase = quant_config.get_quant_method(
|
|
216
|
-
self, prefix
|
|
217
|
-
)
|
|
218
|
-
assert self.quant_method is not None
|
|
206
|
+
self.quant_method: Optional[FusedMoEMethodBase] = None
|
|
207
|
+
if quant_config is not None:
|
|
208
|
+
self.quant_method = quant_config.get_quant_method(self, prefix)
|
|
209
|
+
if self.quant_method is None:
|
|
210
|
+
self.quant_method = UnquantizedFusedMoEMethod(self.use_triton_kernels)
|
|
219
211
|
|
|
220
212
|
self.quant_method.create_weights(
|
|
221
213
|
layer=self,
|
|
@@ -228,11 +220,13 @@ class FusedMoE(torch.nn.Module):
|
|
|
228
220
|
if not use_weight_loader_fused
|
|
229
221
|
else self.weight_loader_fused
|
|
230
222
|
),
|
|
223
|
+
intermediate_size_full=intermediate_size,
|
|
224
|
+
top_k=top_k,
|
|
231
225
|
with_bias=with_bias,
|
|
232
226
|
)
|
|
233
227
|
|
|
234
228
|
self.quant_method.create_moe_runner(self, self.moe_runner_config)
|
|
235
|
-
self.dispatcher =
|
|
229
|
+
self.dispatcher = create_moe_dispatcher(self.moe_runner_config)
|
|
236
230
|
|
|
237
231
|
self.should_fuse_routed_scaling_factor_in_topk = isinstance(
|
|
238
232
|
self.quant_method, ModelOptNvFp4FusedMoEMethod
|
|
@@ -466,9 +460,12 @@ class FusedMoE(torch.nn.Module):
|
|
|
466
460
|
expert_data.copy_(loaded_weight)
|
|
467
461
|
|
|
468
462
|
def _map_global_expert_id_to_local_expert_id(self, expert_id: int) -> int:
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
463
|
+
start_idx = self.moe_ep_rank * self.num_local_experts
|
|
464
|
+
end_idx = (self.moe_ep_rank + 1) * self.num_local_experts
|
|
465
|
+
if start_idx <= expert_id < end_idx:
|
|
466
|
+
return expert_id - start_idx
|
|
467
|
+
else:
|
|
468
|
+
return -1
|
|
472
469
|
|
|
473
470
|
def weight_loader(
|
|
474
471
|
self,
|
|
@@ -540,6 +537,18 @@ class FusedMoE(torch.nn.Module):
|
|
|
540
537
|
if expert_id == -1:
|
|
541
538
|
return
|
|
542
539
|
|
|
540
|
+
if isinstance(
|
|
541
|
+
self.quant_method,
|
|
542
|
+
(
|
|
543
|
+
CompressedTensorsWNA16MoEMethod,
|
|
544
|
+
CompressedTensorsWNA16AMXMoEMethod,
|
|
545
|
+
CompressedTensorsWNA16AMXEPMoEMethod,
|
|
546
|
+
),
|
|
547
|
+
):
|
|
548
|
+
if self.quant_method.num_gpu_experts != -1:
|
|
549
|
+
if expert_id >= self.quant_method.num_gpu_experts:
|
|
550
|
+
return
|
|
551
|
+
|
|
543
552
|
self._weight_loader_impl(
|
|
544
553
|
param=param,
|
|
545
554
|
loaded_weight=loaded_weight,
|
|
@@ -566,7 +575,12 @@ class FusedMoE(torch.nn.Module):
|
|
|
566
575
|
loaded_weight.t().contiguous()
|
|
567
576
|
if (
|
|
568
577
|
self.quant_method.__class__.__name__
|
|
569
|
-
|
|
578
|
+
in [
|
|
579
|
+
"CompressedTensorsWNA16MarlinMoEMethod",
|
|
580
|
+
"CompressedTensorsWNA16MoEMethod",
|
|
581
|
+
"CompressedTensorsWNA16AMXMoEMethod",
|
|
582
|
+
"CompressedTensorsWNA16AMXEPMoEMethod",
|
|
583
|
+
]
|
|
570
584
|
)
|
|
571
585
|
else loaded_weight
|
|
572
586
|
)
|
|
@@ -813,35 +827,21 @@ class FusedMoE(torch.nn.Module):
|
|
|
813
827
|
f"Unsupported weight_name {weight_name} for FusedMoE weight_loader_fused. Nothing is loaded."
|
|
814
828
|
)
|
|
815
829
|
|
|
816
|
-
def forward(self, hidden_states: torch.Tensor, topk_output: TopKOutput):
|
|
830
|
+
def forward(self, hidden_states: torch.Tensor, topk_output: TopKOutput, **kwargs):
|
|
817
831
|
origin_hidden_states_dim = hidden_states.shape[-1]
|
|
818
832
|
assert self.quant_method is not None
|
|
819
833
|
|
|
820
|
-
if self.moe_ep_size > 1 and not self.enable_flashinfer_cutlass_moe:
|
|
821
|
-
if self.expert_map_cpu is not None and self.expert_map_gpu is None:
|
|
822
|
-
# If we are in EP mode, we need to move the expert map to GPU.
|
|
823
|
-
self.expert_map_gpu = self.expert_map_cpu.to(device="cuda")
|
|
824
|
-
|
|
825
|
-
if self.expert_map_gpu is not None:
|
|
826
|
-
if TopKOutputChecker.format_is_standard(topk_output):
|
|
827
|
-
topk_output = topk_output._replace(
|
|
828
|
-
topk_ids=self.expert_map_gpu[topk_output.topk_ids]
|
|
829
|
-
)
|
|
830
|
-
elif TopKOutputChecker.format_is_triton_kernel(topk_output):
|
|
831
|
-
raise NotImplementedError()
|
|
832
|
-
|
|
833
834
|
dispatch_output = self.dispatcher.dispatch(
|
|
834
835
|
hidden_states=hidden_states, topk_output=topk_output
|
|
835
836
|
)
|
|
836
837
|
|
|
837
|
-
|
|
838
|
-
combine_input = self.quant_method.apply(
|
|
839
|
-
layer=self,
|
|
838
|
+
combine_input = self.run_moe_core(
|
|
840
839
|
dispatch_output=dispatch_output,
|
|
840
|
+
**kwargs,
|
|
841
841
|
)
|
|
842
|
-
|
|
843
842
|
final_hidden_states = self.dispatcher.combine(combine_input)
|
|
844
843
|
|
|
844
|
+
# TODO: should we add some conditions here?
|
|
845
845
|
final_hidden_states = final_hidden_states[
|
|
846
846
|
..., :origin_hidden_states_dim
|
|
847
847
|
].contiguous()
|
|
@@ -851,6 +851,14 @@ class FusedMoE(torch.nn.Module):
|
|
|
851
851
|
|
|
852
852
|
return final_hidden_states
|
|
853
853
|
|
|
854
|
+
def run_moe_core(self, dispatch_output: DispatchOutput, **kwargs) -> CombineInput:
|
|
855
|
+
# TODO: consider using symmetric memory
|
|
856
|
+
return self.quant_method.apply(
|
|
857
|
+
layer=self,
|
|
858
|
+
dispatch_output=dispatch_output,
|
|
859
|
+
**kwargs,
|
|
860
|
+
)
|
|
861
|
+
|
|
854
862
|
@classmethod
|
|
855
863
|
def make_expert_params_mapping(
|
|
856
864
|
cls,
|
|
@@ -1061,9 +1069,7 @@ class FlashInferFP4MoE(FusedMoE):
|
|
|
1061
1069
|
local_expert_offset=self.moe_ep_rank * self.num_local_experts,
|
|
1062
1070
|
local_num_experts=self.num_local_experts,
|
|
1063
1071
|
routed_scaling_factor=self.moe_runner_config.routed_scaling_factor,
|
|
1064
|
-
tile_tokens_dim=
|
|
1065
|
-
hidden_states.shape[0], topk_config.top_k, self.num_local_experts
|
|
1066
|
-
),
|
|
1072
|
+
tile_tokens_dim=None,
|
|
1067
1073
|
routing_method_type=RoutingMethodType.DeepSeekV3,
|
|
1068
1074
|
do_finalize=True,
|
|
1069
1075
|
)[0]
|