sglang 0.5.3rc2__py3-none-any.whl → 0.5.4.post1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sglang/bench_one_batch.py +47 -28
 - sglang/bench_one_batch_server.py +41 -25
 - sglang/bench_serving.py +378 -160
 - sglang/check_env.py +1 -1
 - sglang/compile_deep_gemm.py +6 -2
 - sglang/global_config.py +1 -25
 - sglang/lang/api.py +6 -0
 - sglang/lang/interpreter.py +1 -0
 - sglang/lang/ir.py +13 -0
 - sglang/launch_server.py +10 -15
 - sglang/profiler.py +18 -1
 - sglang/srt/_custom_ops.py +1 -1
 - sglang/srt/batch_invariant_ops/batch_invariant_ops.py +105 -10
 - sglang/srt/checkpoint_engine/checkpoint_engine_worker.py +142 -0
 - sglang/srt/compilation/backend.py +437 -0
 - sglang/srt/compilation/compilation_config.py +20 -0
 - sglang/srt/compilation/compilation_counter.py +47 -0
 - sglang/srt/compilation/compile.py +210 -0
 - sglang/srt/compilation/compiler_interface.py +503 -0
 - sglang/srt/compilation/cuda_piecewise_backend.py +228 -0
 - sglang/srt/compilation/fix_functionalization.py +134 -0
 - sglang/srt/compilation/fx_utils.py +83 -0
 - sglang/srt/compilation/inductor_pass.py +140 -0
 - sglang/srt/compilation/pass_manager.py +66 -0
 - sglang/srt/compilation/piecewise_context_manager.py +40 -0
 - sglang/srt/compilation/weak_ref_tensor_jit.py +16 -0
 - sglang/srt/configs/__init__.py +4 -0
 - sglang/srt/configs/deepseek_ocr.py +262 -0
 - sglang/srt/configs/deepseekvl2.py +194 -96
 - sglang/srt/configs/dots_vlm.py +2 -7
 - sglang/srt/configs/falcon_h1.py +13 -64
 - sglang/srt/configs/load_config.py +25 -2
 - sglang/srt/configs/mamba_utils.py +117 -0
 - sglang/srt/configs/model_config.py +136 -25
 - sglang/srt/configs/modelopt_config.py +30 -0
 - sglang/srt/configs/nemotron_h.py +286 -0
 - sglang/srt/configs/olmo3.py +105 -0
 - sglang/srt/configs/points_v15_chat.py +29 -0
 - sglang/srt/configs/qwen3_next.py +11 -47
 - sglang/srt/configs/qwen3_omni.py +613 -0
 - sglang/srt/configs/qwen3_vl.py +0 -10
 - sglang/srt/connector/remote_instance.py +1 -1
 - sglang/srt/constrained/base_grammar_backend.py +5 -1
 - sglang/srt/constrained/llguidance_backend.py +5 -0
 - sglang/srt/constrained/outlines_backend.py +1 -1
 - sglang/srt/constrained/reasoner_grammar_backend.py +9 -6
 - sglang/srt/constrained/utils.py +12 -0
 - sglang/srt/constrained/xgrammar_backend.py +20 -11
 - sglang/srt/disaggregation/ascend/transfer_engine.py +1 -1
 - sglang/srt/disaggregation/base/conn.py +17 -4
 - sglang/srt/disaggregation/common/conn.py +4 -2
 - sglang/srt/disaggregation/decode.py +123 -31
 - sglang/srt/disaggregation/decode_kvcache_offload_manager.py +1 -1
 - sglang/srt/disaggregation/fake/conn.py +11 -3
 - sglang/srt/disaggregation/mooncake/conn.py +157 -19
 - sglang/srt/disaggregation/nixl/conn.py +69 -24
 - sglang/srt/disaggregation/prefill.py +96 -270
 - sglang/srt/distributed/device_communicators/all_reduce_utils.py +4 -4
 - sglang/srt/distributed/device_communicators/custom_all_reduce.py +6 -6
 - sglang/srt/distributed/device_communicators/pymscclpp.py +2 -2
 - sglang/srt/distributed/device_communicators/pynccl.py +24 -12
 - sglang/srt/distributed/device_communicators/pynccl_allocator.py +2 -2
 - sglang/srt/distributed/device_communicators/symm_mem.py +1 -1
 - sglang/srt/distributed/naive_distributed.py +5 -4
 - sglang/srt/distributed/parallel_state.py +63 -19
 - sglang/srt/elastic_ep/elastic_ep.py +74 -0
 - sglang/srt/entrypoints/context.py +3 -2
 - sglang/srt/entrypoints/engine.py +83 -80
 - sglang/srt/entrypoints/grpc_server.py +430 -234
 - sglang/srt/entrypoints/harmony_utils.py +2 -2
 - sglang/srt/entrypoints/http_server.py +195 -102
 - sglang/srt/entrypoints/http_server_engine.py +1 -7
 - sglang/srt/entrypoints/openai/protocol.py +225 -37
 - sglang/srt/entrypoints/openai/serving_base.py +49 -2
 - sglang/srt/entrypoints/openai/serving_chat.py +29 -74
 - sglang/srt/entrypoints/openai/serving_classify.py +204 -0
 - sglang/srt/entrypoints/openai/serving_completions.py +15 -1
 - sglang/srt/entrypoints/openai/serving_responses.py +5 -2
 - sglang/srt/entrypoints/openai/serving_tokenize.py +144 -0
 - sglang/srt/environ.py +58 -6
 - sglang/srt/eplb/eplb_algorithms/__init__.py +18 -1
 - sglang/srt/eplb/eplb_algorithms/deepseek.py +0 -2
 - sglang/srt/eplb/eplb_algorithms/elasticity_aware.py +87 -0
 - sglang/srt/eplb/expert_distribution.py +33 -4
 - sglang/srt/eplb/expert_location_dispatch.py +2 -2
 - sglang/srt/eplb/expert_location_updater.py +2 -2
 - sglang/srt/function_call/base_format_detector.py +17 -18
 - sglang/srt/function_call/function_call_parser.py +20 -14
 - sglang/srt/function_call/glm4_moe_detector.py +1 -5
 - sglang/srt/function_call/gpt_oss_detector.py +1 -1
 - sglang/srt/function_call/json_array_parser.py +0 -2
 - sglang/srt/function_call/minimax_m2.py +367 -0
 - sglang/srt/function_call/utils.py +2 -2
 - sglang/srt/grpc/compile_proto.py +3 -3
 - sglang/srt/{entrypoints → grpc}/grpc_request_manager.py +112 -52
 - sglang/srt/grpc/health_servicer.py +189 -0
 - sglang/srt/grpc/scheduler_launcher.py +181 -0
 - sglang/srt/grpc/sglang_scheduler_pb2.py +78 -70
 - sglang/srt/grpc/sglang_scheduler_pb2.pyi +66 -10
 - sglang/srt/grpc/sglang_scheduler_pb2_grpc.py +89 -1
 - sglang/srt/layers/activation.py +10 -1
 - sglang/srt/layers/attention/aiter_backend.py +3 -3
 - sglang/srt/layers/attention/ascend_backend.py +17 -1
 - sglang/srt/layers/attention/attention_registry.py +43 -23
 - sglang/srt/layers/attention/base_attn_backend.py +20 -1
 - sglang/srt/layers/attention/double_sparsity_backend.py +2 -2
 - sglang/srt/layers/attention/fla/chunk.py +0 -1
 - sglang/srt/layers/attention/fla/chunk_o.py +1 -1
 - sglang/srt/layers/attention/fla/index.py +0 -2
 - sglang/srt/layers/attention/fla/layernorm_gated.py +50 -32
 - sglang/srt/layers/attention/fla/utils.py +0 -3
 - sglang/srt/layers/attention/fla/wy_fast.py +0 -2
 - sglang/srt/layers/attention/flashattention_backend.py +24 -10
 - sglang/srt/layers/attention/flashinfer_backend.py +258 -22
 - sglang/srt/layers/attention/flashinfer_mla_backend.py +38 -28
 - sglang/srt/layers/attention/flashmla_backend.py +2 -2
 - sglang/srt/layers/attention/hybrid_attn_backend.py +1 -1
 - sglang/srt/layers/attention/hybrid_linear_attn_backend.py +165 -62
 - sglang/srt/layers/attention/intel_amx_backend.py +1 -1
 - sglang/srt/layers/attention/mamba/causal_conv1d.py +1 -1
 - sglang/srt/layers/attention/mamba/causal_conv1d_triton.py +9 -5
 - sglang/srt/layers/attention/mamba/mamba.py +189 -241
 - sglang/srt/layers/attention/mamba/mamba2_metadata.py +211 -0
 - sglang/srt/layers/attention/mamba/mixer2_rms_norm_gated.py +120 -0
 - sglang/srt/layers/attention/mamba/ops/ssd_bmm.py +0 -50
 - sglang/srt/layers/attention/mamba/ops/ssd_chunk_scan.py +0 -60
 - sglang/srt/layers/attention/mamba/ops/ssd_chunk_state.py +0 -111
 - sglang/srt/layers/attention/mamba/ops/ssd_combined.py +0 -1
 - sglang/srt/layers/attention/mamba/ops/ssd_state_passing.py +0 -11
 - sglang/srt/layers/attention/npu_ops/mla_preprocess.py +1 -1
 - sglang/srt/layers/attention/nsa/nsa_indexer.py +40 -83
 - sglang/srt/layers/attention/nsa/triton_kernel.py +136 -0
 - sglang/srt/layers/attention/nsa/utils.py +0 -1
 - sglang/srt/layers/attention/nsa_backend.py +404 -90
 - sglang/srt/layers/attention/triton_backend.py +208 -34
 - sglang/srt/layers/attention/triton_ops/double_sparsity_attention.py +2 -2
 - sglang/srt/layers/attention/triton_ops/extend_attention.py +539 -44
 - sglang/srt/layers/attention/trtllm_mha_backend.py +2 -2
 - sglang/srt/layers/attention/trtllm_mla_backend.py +362 -43
 - sglang/srt/layers/attention/utils.py +89 -7
 - sglang/srt/layers/attention/vision.py +3 -3
 - sglang/srt/layers/attention/xpu_backend.py +1028 -0
 - sglang/srt/layers/communicator.py +12 -7
 - sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/compile_utils.py +5 -9
 - sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/configurer.py +4 -3
 - sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/entrypoint.py +3 -3
 - sglang/srt/layers/dp_attention.py +17 -0
 - sglang/srt/layers/layernorm.py +64 -19
 - sglang/srt/layers/linear.py +9 -1
 - sglang/srt/layers/logits_processor.py +152 -17
 - sglang/srt/layers/modelopt_utils.py +11 -0
 - sglang/srt/layers/moe/cutlass_moe.py +0 -2
 - sglang/srt/layers/moe/cutlass_w4a8_moe.py +351 -21
 - sglang/srt/layers/moe/ep_moe/kernels.py +229 -457
 - sglang/srt/layers/moe/ep_moe/layer.py +154 -625
 - sglang/srt/layers/moe/flashinfer_cutedsl_moe.py +1 -1
 - sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=128,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
 - sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=256,N=256,device_name=NVIDIA_B200.json +146 -0
 - sglang/srt/layers/moe/fused_moe_triton/fused_moe_triton_config.py +11 -3
 - sglang/srt/layers/moe/fused_moe_triton/layer.py +79 -73
 - sglang/srt/layers/moe/fused_moe_triton/triton_kernels_moe.py +25 -46
 - sglang/srt/layers/moe/moe_runner/deep_gemm.py +569 -0
 - sglang/srt/layers/moe/moe_runner/runner.py +6 -0
 - sglang/srt/layers/moe/moe_runner/triton.py +3 -1
 - sglang/srt/layers/moe/moe_runner/triton_kernels.py +194 -0
 - sglang/srt/layers/moe/rocm_moe_utils.py +0 -1
 - sglang/srt/layers/moe/router.py +51 -15
 - sglang/srt/layers/moe/token_dispatcher/__init__.py +14 -4
 - sglang/srt/layers/moe/token_dispatcher/base.py +12 -6
 - sglang/srt/layers/moe/token_dispatcher/deepep.py +127 -110
 - sglang/srt/layers/moe/token_dispatcher/mooncake.py +386 -0
 - sglang/srt/layers/moe/token_dispatcher/standard.py +46 -0
 - sglang/srt/layers/moe/topk.py +7 -6
 - sglang/srt/layers/moe/utils.py +20 -5
 - sglang/srt/layers/quantization/__init__.py +5 -58
 - sglang/srt/layers/quantization/awq.py +183 -9
 - sglang/srt/layers/quantization/awq_triton.py +29 -0
 - sglang/srt/layers/quantization/base_config.py +27 -1
 - sglang/srt/layers/quantization/compressed_tensors/__init__.py +7 -0
 - sglang/srt/layers/quantization/compressed_tensors/compressed_tensors.py +20 -49
 - sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +421 -70
 - sglang/srt/layers/quantization/compressed_tensors/schemes/__init__.py +3 -0
 - sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +4 -22
 - sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +339 -0
 - sglang/srt/layers/quantization/fp8.py +152 -81
 - sglang/srt/layers/quantization/fp8_kernel.py +55 -10
 - sglang/srt/layers/quantization/fp8_utils.py +42 -14
 - sglang/srt/layers/quantization/fpgemm_fp8.py +2 -3
 - sglang/srt/layers/quantization/gguf.py +566 -0
 - sglang/srt/layers/quantization/gptq.py +0 -1
 - sglang/srt/layers/quantization/int8_kernel.py +18 -2
 - sglang/srt/layers/quantization/marlin_utils.py +12 -0
 - sglang/srt/layers/quantization/modelopt_quant.py +125 -100
 - sglang/srt/layers/quantization/mxfp4.py +35 -68
 - sglang/srt/layers/quantization/petit.py +1 -1
 - sglang/srt/layers/quantization/quark/quark.py +3 -1
 - sglang/srt/layers/quantization/quark/quark_moe.py +3 -3
 - sglang/srt/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +0 -7
 - sglang/srt/layers/quantization/unquant.py +23 -48
 - sglang/srt/layers/quantization/utils.py +0 -1
 - sglang/srt/layers/quantization/w4afp8.py +87 -20
 - sglang/srt/layers/quantization/w8a8_int8.py +30 -24
 - sglang/srt/layers/radix_attention.py +62 -9
 - sglang/srt/layers/rotary_embedding.py +686 -17
 - sglang/srt/layers/sampler.py +47 -16
 - sglang/srt/layers/sparse_pooler.py +98 -0
 - sglang/srt/layers/utils.py +0 -1
 - sglang/srt/layers/vocab_parallel_embedding.py +4 -1
 - sglang/srt/lora/backend/triton_backend.py +0 -1
 - sglang/srt/lora/eviction_policy.py +139 -0
 - sglang/srt/lora/lora_manager.py +24 -9
 - sglang/srt/lora/lora_registry.py +1 -1
 - sglang/srt/lora/mem_pool.py +40 -16
 - sglang/srt/lora/triton_ops/chunked_sgmv_expand.py +1 -1
 - sglang/srt/lora/triton_ops/chunked_sgmv_shrink.py +4 -2
 - sglang/srt/managers/cache_controller.py +48 -17
 - sglang/srt/managers/data_parallel_controller.py +146 -42
 - sglang/srt/managers/detokenizer_manager.py +40 -13
 - sglang/srt/managers/io_struct.py +69 -16
 - sglang/srt/managers/mm_utils.py +20 -18
 - sglang/srt/managers/multi_tokenizer_mixin.py +83 -82
 - sglang/srt/managers/overlap_utils.py +96 -19
 - sglang/srt/managers/schedule_batch.py +241 -511
 - sglang/srt/managers/schedule_policy.py +15 -2
 - sglang/srt/managers/scheduler.py +420 -514
 - sglang/srt/managers/scheduler_metrics_mixin.py +73 -18
 - sglang/srt/managers/scheduler_output_processor_mixin.py +317 -111
 - sglang/srt/managers/scheduler_pp_mixin.py +341 -0
 - sglang/srt/managers/scheduler_profiler_mixin.py +60 -14
 - sglang/srt/managers/scheduler_runtime_checker_mixin.py +217 -0
 - sglang/srt/managers/scheduler_update_weights_mixin.py +33 -14
 - sglang/srt/managers/tokenizer_communicator_mixin.py +71 -55
 - sglang/srt/managers/tokenizer_manager.py +375 -95
 - sglang/srt/managers/tp_worker.py +212 -161
 - sglang/srt/managers/utils.py +78 -2
 - sglang/srt/mem_cache/allocator.py +7 -2
 - sglang/srt/mem_cache/allocator_ascend.py +2 -2
 - sglang/srt/mem_cache/base_prefix_cache.py +2 -2
 - sglang/srt/mem_cache/chunk_cache.py +13 -2
 - sglang/srt/mem_cache/common.py +480 -0
 - sglang/srt/mem_cache/evict_policy.py +16 -1
 - sglang/srt/mem_cache/hicache_storage.py +11 -2
 - sglang/srt/mem_cache/hiradix_cache.py +16 -3
 - sglang/srt/mem_cache/mamba_radix_cache.py +993 -0
 - sglang/srt/mem_cache/memory_pool.py +517 -219
 - sglang/srt/mem_cache/memory_pool_host.py +0 -1
 - sglang/srt/mem_cache/multimodal_cache.py +0 -1
 - sglang/srt/mem_cache/radix_cache.py +53 -19
 - sglang/srt/mem_cache/radix_cache_cpp.py +19 -14
 - sglang/srt/mem_cache/storage/aibrix_kvcache/aibrix_kvcache_storage.py +8 -2
 - sglang/srt/mem_cache/storage/aibrix_kvcache/unit_test.py +1 -13
 - sglang/srt/mem_cache/storage/backend_factory.py +2 -2
 - sglang/srt/mem_cache/storage/eic/eic_storage.py +5 -6
 - sglang/srt/mem_cache/storage/hf3fs/hf3fs_client.py +0 -1
 - sglang/srt/mem_cache/storage/hf3fs/mini_3fs_metadata_server.py +3 -2
 - sglang/srt/mem_cache/storage/hf3fs/storage_hf3fs.py +9 -3
 - sglang/srt/mem_cache/storage/lmcache/lmc_radix_cache.py +5 -3
 - sglang/srt/mem_cache/storage/mooncake_store/mooncake_store.py +101 -17
 - sglang/srt/mem_cache/storage/nixl/hicache_nixl.py +38 -9
 - sglang/srt/mem_cache/storage/nixl/nixl_utils.py +1 -1
 - sglang/srt/mem_cache/storage/nixl/test_hicache_nixl_storage.py +17 -2
 - sglang/srt/mem_cache/swa_radix_cache.py +92 -26
 - sglang/srt/metrics/collector.py +31 -0
 - sglang/srt/metrics/func_timer.py +1 -1
 - sglang/srt/model_executor/cuda_graph_runner.py +43 -5
 - sglang/srt/model_executor/forward_batch_info.py +71 -25
 - sglang/srt/model_executor/model_runner.py +362 -270
 - sglang/srt/model_executor/npu_graph_runner.py +2 -3
 - sglang/srt/model_executor/piecewise_cuda_graph_runner.py +549 -0
 - sglang/srt/model_loader/__init__.py +1 -1
 - sglang/srt/model_loader/loader.py +424 -27
 - sglang/srt/model_loader/utils.py +0 -1
 - sglang/srt/model_loader/weight_utils.py +47 -28
 - sglang/srt/models/apertus.py +2 -3
 - sglang/srt/models/arcee.py +2 -2
 - sglang/srt/models/bailing_moe.py +13 -52
 - sglang/srt/models/bailing_moe_nextn.py +3 -4
 - sglang/srt/models/bert.py +1 -1
 - sglang/srt/models/deepseek_nextn.py +19 -3
 - sglang/srt/models/deepseek_ocr.py +1516 -0
 - sglang/srt/models/deepseek_v2.py +418 -140
 - sglang/srt/models/dots_ocr.py +0 -2
 - sglang/srt/models/dots_vlm.py +0 -1
 - sglang/srt/models/dots_vlm_vit.py +1 -1
 - sglang/srt/models/falcon_h1.py +13 -19
 - sglang/srt/models/gemma3_mm.py +16 -0
 - sglang/srt/models/gemma3n_mm.py +1 -2
 - sglang/srt/models/glm4_moe.py +327 -382
 - sglang/srt/models/glm4_moe_nextn.py +6 -16
 - sglang/srt/models/glm4v.py +2 -1
 - sglang/srt/models/glm4v_moe.py +32 -199
 - sglang/srt/models/gpt_oss.py +5 -5
 - sglang/srt/models/grok.py +10 -23
 - sglang/srt/models/hunyuan.py +2 -7
 - sglang/srt/models/interns1.py +0 -1
 - sglang/srt/models/kimi_vl.py +1 -7
 - sglang/srt/models/kimi_vl_moonvit.py +3 -1
 - sglang/srt/models/llama.py +2 -2
 - sglang/srt/models/llama_eagle3.py +1 -1
 - sglang/srt/models/longcat_flash.py +5 -22
 - sglang/srt/models/longcat_flash_nextn.py +3 -14
 - sglang/srt/models/mimo.py +2 -13
 - sglang/srt/models/mimo_mtp.py +1 -2
 - sglang/srt/models/minicpmo.py +7 -5
 - sglang/srt/models/minimax_m2.py +922 -0
 - sglang/srt/models/mixtral.py +1 -4
 - sglang/srt/models/mllama.py +1 -1
 - sglang/srt/models/mllama4.py +13 -3
 - sglang/srt/models/nemotron_h.py +511 -0
 - sglang/srt/models/nvila.py +355 -0
 - sglang/srt/models/nvila_lite.py +184 -0
 - sglang/srt/models/olmo2.py +31 -4
 - sglang/srt/models/opt.py +5 -5
 - sglang/srt/models/phi.py +1 -1
 - sglang/srt/models/phi4mm.py +1 -1
 - sglang/srt/models/phimoe.py +0 -1
 - sglang/srt/models/pixtral.py +0 -3
 - sglang/srt/models/points_v15_chat.py +186 -0
 - sglang/srt/models/qwen.py +0 -1
 - sglang/srt/models/qwen2.py +22 -1
 - sglang/srt/models/qwen2_5_vl.py +3 -3
 - sglang/srt/models/qwen2_audio.py +2 -15
 - sglang/srt/models/qwen2_moe.py +15 -12
 - sglang/srt/models/qwen2_vl.py +5 -2
 - sglang/srt/models/qwen3.py +34 -4
 - sglang/srt/models/qwen3_moe.py +19 -37
 - sglang/srt/models/qwen3_next.py +7 -12
 - sglang/srt/models/qwen3_next_mtp.py +3 -4
 - sglang/srt/models/qwen3_omni_moe.py +661 -0
 - sglang/srt/models/qwen3_vl.py +37 -33
 - sglang/srt/models/qwen3_vl_moe.py +57 -185
 - sglang/srt/models/roberta.py +55 -3
 - sglang/srt/models/sarashina2_vision.py +0 -1
 - sglang/srt/models/step3_vl.py +3 -5
 - sglang/srt/models/utils.py +11 -1
 - sglang/srt/multimodal/processors/base_processor.py +7 -2
 - sglang/srt/multimodal/processors/deepseek_ocr.py +37 -0
 - sglang/srt/multimodal/processors/deepseek_vl_v2.py +0 -3
 - sglang/srt/multimodal/processors/dots_vlm.py +0 -1
 - sglang/srt/multimodal/processors/glm4v.py +2 -6
 - sglang/srt/multimodal/processors/internvl.py +0 -2
 - sglang/srt/multimodal/processors/janus_pro.py +0 -1
 - sglang/srt/multimodal/processors/mllama4.py +0 -8
 - sglang/srt/multimodal/processors/{vila.py → nvila.py} +32 -24
 - sglang/srt/multimodal/processors/phi4mm.py +0 -1
 - sglang/srt/multimodal/processors/points_v15_chat.py +52 -0
 - sglang/srt/multimodal/processors/qwen_vl.py +75 -16
 - sglang/srt/multimodal/processors/step3_vl.py +1 -1
 - sglang/srt/parser/conversation.py +41 -0
 - sglang/srt/parser/reasoning_parser.py +28 -2
 - sglang/srt/sampling/custom_logit_processor.py +77 -2
 - sglang/srt/sampling/sampling_batch_info.py +17 -22
 - sglang/srt/sampling/sampling_params.py +70 -2
 - sglang/srt/server_args.py +846 -163
 - sglang/srt/server_args_config_parser.py +1 -1
 - sglang/srt/single_batch_overlap.py +36 -31
 - sglang/srt/speculative/base_spec_worker.py +34 -0
 - sglang/srt/speculative/draft_utils.py +226 -0
 - sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +24 -7
 - sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py +23 -2
 - sglang/srt/speculative/eagle_info.py +57 -18
 - sglang/srt/speculative/eagle_info_v2.py +458 -0
 - sglang/srt/speculative/eagle_utils.py +138 -0
 - sglang/srt/speculative/eagle_worker.py +83 -280
 - sglang/srt/speculative/eagle_worker_v2.py +702 -0
 - sglang/srt/speculative/{ngram_utils.py → ngram_info.py} +14 -9
 - sglang/srt/speculative/ngram_worker.py +12 -11
 - sglang/srt/speculative/spec_info.py +2 -0
 - sglang/srt/speculative/spec_utils.py +38 -3
 - sglang/srt/speculative/standalone_worker.py +4 -14
 - sglang/srt/tokenizer/tiktoken_tokenizer.py +2 -2
 - sglang/srt/two_batch_overlap.py +28 -14
 - sglang/srt/utils/__init__.py +1 -1
 - sglang/srt/{bench_utils.py → utils/bench_utils.py} +4 -2
 - sglang/srt/utils/common.py +272 -82
 - sglang/srt/utils/hf_transformers_utils.py +44 -17
 - sglang/srt/{host_shared_memory.py → utils/host_shared_memory.py} +0 -1
 - sglang/srt/{offloader.py → utils/offloader.py} +4 -4
 - sglang/srt/utils/profile_merger.py +199 -0
 - sglang/test/attention/test_flashattn_backend.py +1 -1
 - sglang/test/attention/test_flashattn_mla_backend.py +0 -1
 - sglang/test/attention/test_prefix_chunk_info.py +0 -2
 - sglang/test/attention/test_trtllm_mla_backend.py +221 -53
 - sglang/test/few_shot_gsm8k_engine.py +2 -4
 - sglang/test/kit_matched_stop.py +157 -0
 - sglang/test/longbench_v2/__init__.py +1 -0
 - sglang/test/longbench_v2/test_longbench_v2_eval.py +238 -0
 - sglang/test/longbench_v2/validate_longbench_v2.py +337 -0
 - sglang/test/longbench_v2/validate_longbench_v2_standalone.py +306 -0
 - sglang/test/run_eval.py +41 -0
 - sglang/test/runners.py +2 -0
 - sglang/test/send_one.py +42 -7
 - sglang/test/simple_eval_common.py +3 -0
 - sglang/test/simple_eval_gpqa.py +0 -1
 - sglang/test/simple_eval_humaneval.py +0 -3
 - sglang/test/simple_eval_longbench_v2.py +344 -0
 - sglang/test/test_block_fp8.py +1 -2
 - sglang/test/test_block_fp8_deep_gemm_blackwell.py +0 -1
 - sglang/test/test_cutlass_moe.py +1 -2
 - sglang/test/test_cutlass_w4a8_moe.py +10 -20
 - sglang/test/test_deterministic.py +463 -107
 - sglang/test/test_deterministic_utils.py +74 -0
 - sglang/test/test_disaggregation_utils.py +81 -0
 - sglang/test/test_marlin_moe.py +0 -1
 - sglang/test/test_utils.py +85 -20
 - sglang/version.py +1 -1
 - {sglang-0.5.3rc2.dist-info → sglang-0.5.4.post1.dist-info}/METADATA +48 -35
 - {sglang-0.5.3rc2.dist-info → sglang-0.5.4.post1.dist-info}/RECORD +414 -350
 - sglang/srt/layers/attention/mamba/mamba_utils.py +0 -81
 - sglang/srt/managers/tp_worker_overlap_thread.py +0 -311
 - sglang/srt/models/vila.py +0 -306
 - sglang/srt/speculative/build_eagle_tree.py +0 -427
 - sglang/test/test_block_fp8_ep.py +0 -358
 - /sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/__init__.py +0 -0
 - /sglang/srt/{aio_rwlock.py → utils/aio_rwlock.py} +0 -0
 - /sglang/srt/{torch_memory_saver_adapter.py → utils/torch_memory_saver_adapter.py} +0 -0
 - {sglang-0.5.3rc2.dist-info → sglang-0.5.4.post1.dist-info}/WHEEL +0 -0
 - {sglang-0.5.3rc2.dist-info → sglang-0.5.4.post1.dist-info}/licenses/LICENSE +0 -0
 - {sglang-0.5.3rc2.dist-info → sglang-0.5.4.post1.dist-info}/top_level.txt +0 -0
 
| 
         @@ -0,0 +1,138 @@ 
     | 
|
| 
      
 1 
     | 
    
         
            +
            import math
         
     | 
| 
      
 2 
     | 
    
         
            +
            from enum import IntEnum
         
     | 
| 
      
 3 
     | 
    
         
            +
            from typing import List, Optional
         
     | 
| 
      
 4 
     | 
    
         
            +
             
     | 
| 
      
 5 
     | 
    
         
            +
            import torch
         
     | 
| 
      
 6 
     | 
    
         
            +
             
     | 
| 
      
 7 
     | 
    
         
            +
            from sglang.srt.utils import is_cuda, is_hip
         
     | 
| 
      
 8 
     | 
    
         
            +
             
     | 
| 
      
 9 
     | 
    
         
            +
            if is_cuda() or is_hip():
         
     | 
| 
      
 10 
     | 
    
         
            +
                from sgl_kernel import (
         
     | 
| 
      
 11 
     | 
    
         
            +
                    build_tree_kernel_efficient as sgl_build_tree_kernel_efficient,
         
     | 
| 
      
 12 
     | 
    
         
            +
                )
         
     | 
| 
      
 13 
     | 
    
         
            +
             
     | 
| 
      
 14 
     | 
    
         
            +
             
     | 
| 
      
 15 
     | 
    
         
            +
            def organize_draft_results(
         
     | 
| 
      
 16 
     | 
    
         
            +
                score_list: List[torch.Tensor],
         
     | 
| 
      
 17 
     | 
    
         
            +
                token_list: List[torch.Tensor],
         
     | 
| 
      
 18 
     | 
    
         
            +
                parents_list: List[torch.Tensor],
         
     | 
| 
      
 19 
     | 
    
         
            +
                num_draft_token: int,
         
     | 
| 
      
 20 
     | 
    
         
            +
            ):
         
     | 
| 
      
 21 
     | 
    
         
            +
                score_list = torch.cat(score_list, dim=1).flatten(1)
         
     | 
| 
      
 22 
     | 
    
         
            +
                ss_token_list = torch.cat(token_list, dim=1)
         
     | 
| 
      
 23 
     | 
    
         
            +
                top_scores = torch.topk(score_list, num_draft_token - 1, dim=-1)
         
     | 
| 
      
 24 
     | 
    
         
            +
                top_scores_index = top_scores.indices
         
     | 
| 
      
 25 
     | 
    
         
            +
                top_scores_index = torch.sort(top_scores_index).values
         
     | 
| 
      
 26 
     | 
    
         
            +
                draft_tokens = torch.gather(ss_token_list, index=top_scores_index, dim=1)
         
     | 
| 
      
 27 
     | 
    
         
            +
             
     | 
| 
      
 28 
     | 
    
         
            +
                if len(parents_list) > 1:
         
     | 
| 
      
 29 
     | 
    
         
            +
                    parent_list = torch.cat(parents_list[:-1], dim=1)
         
     | 
| 
      
 30 
     | 
    
         
            +
                else:
         
     | 
| 
      
 31 
     | 
    
         
            +
                    batch_size = parents_list[0].shape[0]
         
     | 
| 
      
 32 
     | 
    
         
            +
                    parent_list = torch.empty(batch_size, 0, device=parents_list[0].device)
         
     | 
| 
      
 33 
     | 
    
         
            +
             
     | 
| 
      
 34 
     | 
    
         
            +
                return parent_list, top_scores_index, draft_tokens
         
     | 
| 
      
 35 
     | 
    
         
            +
             
     | 
| 
      
 36 
     | 
    
         
            +
             
     | 
| 
      
 37 
     | 
    
         
            +
            class TreeMaskMode(IntEnum):
         
     | 
| 
      
 38 
     | 
    
         
            +
                FULL_MASK = 0
         
     | 
| 
      
 39 
     | 
    
         
            +
                QLEN_ONLY = 1
         
     | 
| 
      
 40 
     | 
    
         
            +
                QLEN_ONLY_BITPACKING = 2
         
     | 
| 
      
 41 
     | 
    
         
            +
             
     | 
| 
      
 42 
     | 
    
         
            +
             
     | 
| 
      
 43 
     | 
    
         
            +
            def build_tree_kernel_efficient(
         
     | 
| 
      
 44 
     | 
    
         
            +
                verified_id: torch.Tensor,
         
     | 
| 
      
 45 
     | 
    
         
            +
                parent_list: List[torch.Tensor],
         
     | 
| 
      
 46 
     | 
    
         
            +
                top_scores_index: torch.Tensor,
         
     | 
| 
      
 47 
     | 
    
         
            +
                draft_tokens: torch.Tensor,
         
     | 
| 
      
 48 
     | 
    
         
            +
                seq_lens: torch.Tensor,
         
     | 
| 
      
 49 
     | 
    
         
            +
                seq_lens_sum: int,
         
     | 
| 
      
 50 
     | 
    
         
            +
                topk: int,
         
     | 
| 
      
 51 
     | 
    
         
            +
                spec_steps: int,
         
     | 
| 
      
 52 
     | 
    
         
            +
                num_verify_tokens: int,
         
     | 
| 
      
 53 
     | 
    
         
            +
                tree_mask_mode: TreeMaskMode = TreeMaskMode.FULL_MASK,
         
     | 
| 
      
 54 
     | 
    
         
            +
                tree_mask_buf: Optional[torch.Tensor] = None,
         
     | 
| 
      
 55 
     | 
    
         
            +
                position_buf: Optional[torch.Tensor] = None,
         
     | 
| 
      
 56 
     | 
    
         
            +
            ):
         
     | 
| 
      
 57 
     | 
    
         
            +
                draft_tokens = torch.cat((verified_id.unsqueeze(1), draft_tokens), dim=1).flatten()
         
     | 
| 
      
 58 
     | 
    
         
            +
             
     | 
| 
      
 59 
     | 
    
         
            +
                # seq_lens_sum == sum(seq_lens); seq_lens: sequence length without draft tokens
         
     | 
| 
      
 60 
     | 
    
         
            +
                bs = seq_lens.numel()
         
     | 
| 
      
 61 
     | 
    
         
            +
                device = seq_lens.device
         
     | 
| 
      
 62 
     | 
    
         
            +
                # e.g. for bs=1, tree_mask: num_draft_token, seq_lens_sum + num_draft_token (flattened)
         
     | 
| 
      
 63 
     | 
    
         
            +
                # where each row indicates the attending pattern of each draft token
         
     | 
| 
      
 64 
     | 
    
         
            +
                # if use_partial_packed_tree_mask is True, tree_mask: num_draft_token (flattened, packed)
         
     | 
| 
      
 65 
     | 
    
         
            +
                if tree_mask_buf is not None:
         
     | 
| 
      
 66 
     | 
    
         
            +
                    tree_mask = tree_mask_buf
         
     | 
| 
      
 67 
     | 
    
         
            +
                    if tree_mask_mode == TreeMaskMode.QLEN_ONLY:
         
     | 
| 
      
 68 
     | 
    
         
            +
                        tree_mask.fill_(True)
         
     | 
| 
      
 69 
     | 
    
         
            +
                    elif tree_mask_mode == TreeMaskMode.QLEN_ONLY_BITPACKING:
         
     | 
| 
      
 70 
     | 
    
         
            +
                        tree_mask.fill_(0)
         
     | 
| 
      
 71 
     | 
    
         
            +
                    elif tree_mask_mode == TreeMaskMode.FULL_MASK:
         
     | 
| 
      
 72 
     | 
    
         
            +
                        tree_mask.fill_(True)
         
     | 
| 
      
 73 
     | 
    
         
            +
                    else:
         
     | 
| 
      
 74 
     | 
    
         
            +
                        raise NotImplementedError(f"Invalid tree mask: {tree_mask_mode=}")
         
     | 
| 
      
 75 
     | 
    
         
            +
                elif tree_mask_mode == TreeMaskMode.QLEN_ONLY:
         
     | 
| 
      
 76 
     | 
    
         
            +
                    tree_mask = torch.full(
         
     | 
| 
      
 77 
     | 
    
         
            +
                        (num_verify_tokens * bs * num_verify_tokens,),
         
     | 
| 
      
 78 
     | 
    
         
            +
                        True,
         
     | 
| 
      
 79 
     | 
    
         
            +
                        dtype=torch.bool,
         
     | 
| 
      
 80 
     | 
    
         
            +
                        device=device,
         
     | 
| 
      
 81 
     | 
    
         
            +
                    )
         
     | 
| 
      
 82 
     | 
    
         
            +
                elif tree_mask_mode == TreeMaskMode.QLEN_ONLY_BITPACKING:
         
     | 
| 
      
 83 
     | 
    
         
            +
                    packed_dtypes = [torch.uint8, torch.uint16, torch.uint32]
         
     | 
| 
      
 84 
     | 
    
         
            +
                    packed_dtype_idx = int(math.ceil(math.log2((num_verify_tokens + 7) // 8)))
         
     | 
| 
      
 85 
     | 
    
         
            +
                    tree_mask = torch.zeros(
         
     | 
| 
      
 86 
     | 
    
         
            +
                        (num_verify_tokens * bs,),
         
     | 
| 
      
 87 
     | 
    
         
            +
                        dtype=packed_dtypes[packed_dtype_idx],
         
     | 
| 
      
 88 
     | 
    
         
            +
                        device=device,
         
     | 
| 
      
 89 
     | 
    
         
            +
                    )
         
     | 
| 
      
 90 
     | 
    
         
            +
                elif tree_mask_mode == TreeMaskMode.FULL_MASK:
         
     | 
| 
      
 91 
     | 
    
         
            +
                    tree_mask = torch.full(
         
     | 
| 
      
 92 
     | 
    
         
            +
                        (
         
     | 
| 
      
 93 
     | 
    
         
            +
                            seq_lens_sum * num_verify_tokens
         
     | 
| 
      
 94 
     | 
    
         
            +
                            + num_verify_tokens * num_verify_tokens * bs,
         
     | 
| 
      
 95 
     | 
    
         
            +
                        ),
         
     | 
| 
      
 96 
     | 
    
         
            +
                        True,
         
     | 
| 
      
 97 
     | 
    
         
            +
                        device=device,
         
     | 
| 
      
 98 
     | 
    
         
            +
                    )
         
     | 
| 
      
 99 
     | 
    
         
            +
                else:
         
     | 
| 
      
 100 
     | 
    
         
            +
                    raise NotImplementedError(f"Invalid tree mask: {tree_mask_mode=}")
         
     | 
| 
      
 101 
     | 
    
         
            +
             
     | 
| 
      
 102 
     | 
    
         
            +
                # TODO: make them torch.empty and fuse them into `sgl_build_tree_kernel`
         
     | 
| 
      
 103 
     | 
    
         
            +
                retrive_buf = torch.full(
         
     | 
| 
      
 104 
     | 
    
         
            +
                    (3, bs, num_verify_tokens), -1, device=device, dtype=torch.long
         
     | 
| 
      
 105 
     | 
    
         
            +
                )
         
     | 
| 
      
 106 
     | 
    
         
            +
                retrive_index, retrive_next_token, retrive_next_sibling = retrive_buf
         
     | 
| 
      
 107 
     | 
    
         
            +
                # position: where each token belongs to
         
     | 
| 
      
 108 
     | 
    
         
            +
                # e.g. if depth of each draft token is [0, 1, 1, 2] and the prompt length is 7
         
     | 
| 
      
 109 
     | 
    
         
            +
                # then, positions = [7, 8, 8, 9]
         
     | 
| 
      
 110 
     | 
    
         
            +
                if position_buf is not None:
         
     | 
| 
      
 111 
     | 
    
         
            +
                    positions = position_buf
         
     | 
| 
      
 112 
     | 
    
         
            +
                else:
         
     | 
| 
      
 113 
     | 
    
         
            +
                    positions = torch.empty(
         
     | 
| 
      
 114 
     | 
    
         
            +
                        (bs * num_verify_tokens,), device=device, dtype=torch.long
         
     | 
| 
      
 115 
     | 
    
         
            +
                    )
         
     | 
| 
      
 116 
     | 
    
         
            +
             
     | 
| 
      
 117 
     | 
    
         
            +
                sgl_build_tree_kernel_efficient(
         
     | 
| 
      
 118 
     | 
    
         
            +
                    parent_list,
         
     | 
| 
      
 119 
     | 
    
         
            +
                    top_scores_index,
         
     | 
| 
      
 120 
     | 
    
         
            +
                    seq_lens,
         
     | 
| 
      
 121 
     | 
    
         
            +
                    tree_mask,
         
     | 
| 
      
 122 
     | 
    
         
            +
                    positions,
         
     | 
| 
      
 123 
     | 
    
         
            +
                    retrive_index,
         
     | 
| 
      
 124 
     | 
    
         
            +
                    retrive_next_token,
         
     | 
| 
      
 125 
     | 
    
         
            +
                    retrive_next_sibling,
         
     | 
| 
      
 126 
     | 
    
         
            +
                    topk,
         
     | 
| 
      
 127 
     | 
    
         
            +
                    spec_steps,
         
     | 
| 
      
 128 
     | 
    
         
            +
                    num_verify_tokens,
         
     | 
| 
      
 129 
     | 
    
         
            +
                    tree_mask_mode,
         
     | 
| 
      
 130 
     | 
    
         
            +
                )
         
     | 
| 
      
 131 
     | 
    
         
            +
                return (
         
     | 
| 
      
 132 
     | 
    
         
            +
                    tree_mask,
         
     | 
| 
      
 133 
     | 
    
         
            +
                    positions,
         
     | 
| 
      
 134 
     | 
    
         
            +
                    retrive_index,
         
     | 
| 
      
 135 
     | 
    
         
            +
                    retrive_next_token,
         
     | 
| 
      
 136 
     | 
    
         
            +
                    retrive_next_sibling,
         
     | 
| 
      
 137 
     | 
    
         
            +
                    draft_tokens,
         
     | 
| 
      
 138 
     | 
    
         
            +
                )
         
     | 
| 
         @@ -1,33 +1,27 @@ 
     | 
|
| 
       1 
1 
     | 
    
         
             
            import logging
         
     | 
| 
       2 
     | 
    
         
            -
            import os
         
     | 
| 
       3 
2 
     | 
    
         
             
            import time
         
     | 
| 
       4 
     | 
    
         
            -
            from contextlib import contextmanager
         
     | 
| 
       5 
3 
     | 
    
         
             
            from typing import List, Optional, Tuple
         
     | 
| 
       6 
4 
     | 
    
         | 
| 
       7 
5 
     | 
    
         
             
            import torch
         
     | 
| 
       8 
     | 
    
         
            -
            from huggingface_hub import snapshot_download
         
     | 
| 
       9 
6 
     | 
    
         | 
| 
       10 
     | 
    
         
            -
            from sglang.srt.distributed import  
     | 
| 
       11 
     | 
    
         
            -
                GroupCoordinator,
         
     | 
| 
       12 
     | 
    
         
            -
                get_tp_group,
         
     | 
| 
       13 
     | 
    
         
            -
                patch_tensor_parallel_group,
         
     | 
| 
       14 
     | 
    
         
            -
            )
         
     | 
| 
      
 7 
     | 
    
         
            +
            from sglang.srt.distributed import get_tp_group
         
     | 
| 
       15 
8 
     | 
    
         
             
            from sglang.srt.layers.logits_processor import LogitsProcessorOutput
         
     | 
| 
       16 
9 
     | 
    
         
             
            from sglang.srt.layers.sampler import get_token_ids_logprobs, get_top_logprobs
         
     | 
| 
       17 
     | 
    
         
            -
            from sglang.srt.managers.schedule_batch import  
     | 
| 
       18 
     | 
    
         
            -
             
     | 
| 
      
 10 
     | 
    
         
            +
            from sglang.srt.managers.schedule_batch import ScheduleBatch
         
     | 
| 
      
 11 
     | 
    
         
            +
            from sglang.srt.managers.scheduler import GenerationBatchResult
         
     | 
| 
      
 12 
     | 
    
         
            +
            from sglang.srt.managers.tp_worker import TpModelWorker
         
     | 
| 
      
 13 
     | 
    
         
            +
            from sglang.srt.mem_cache.common import (
         
     | 
| 
      
 14 
     | 
    
         
            +
                alloc_paged_token_slots_extend,
         
     | 
| 
      
 15 
     | 
    
         
            +
                alloc_token_slots,
         
     | 
| 
       19 
16 
     | 
    
         
             
                get_last_loc,
         
     | 
| 
       20 
     | 
    
         
            -
                global_server_args_dict,
         
     | 
| 
       21 
17 
     | 
    
         
             
            )
         
     | 
| 
       22 
     | 
    
         
            -
            from sglang.srt.managers.tp_worker import TpModelWorker
         
     | 
| 
       23 
18 
     | 
    
         
             
            from sglang.srt.model_executor.forward_batch_info import (
         
     | 
| 
       24 
19 
     | 
    
         
             
                CaptureHiddenMode,
         
     | 
| 
       25 
20 
     | 
    
         
             
                ForwardBatch,
         
     | 
| 
       26 
     | 
    
         
            -
                ForwardBatchOutput,
         
     | 
| 
       27 
21 
     | 
    
         
             
                ForwardMode,
         
     | 
| 
       28 
22 
     | 
    
         
             
            )
         
     | 
| 
       29 
23 
     | 
    
         
             
            from sglang.srt.server_args import ServerArgs
         
     | 
| 
       30 
     | 
    
         
            -
            from sglang.srt.speculative. 
     | 
| 
      
 24 
     | 
    
         
            +
            from sglang.srt.speculative.draft_utils import DraftBackendFactory
         
     | 
| 
       31 
25 
     | 
    
         
             
            from sglang.srt.speculative.eagle_draft_cuda_graph_runner import (
         
     | 
| 
       32 
26 
     | 
    
         
             
                EAGLEDraftCudaGraphRunner,
         
     | 
| 
       33 
27 
     | 
    
         
             
            )
         
     | 
| 
         @@ -39,35 +33,33 @@ from sglang.srt.speculative.eagle_info import ( 
     | 
|
| 
       39 
33 
     | 
    
         
             
                EagleVerifyInput,
         
     | 
| 
       40 
34 
     | 
    
         
             
                EagleVerifyOutput,
         
     | 
| 
       41 
35 
     | 
    
         
             
            )
         
     | 
| 
      
 36 
     | 
    
         
            +
            from sglang.srt.speculative.eagle_utils import (
         
     | 
| 
      
 37 
     | 
    
         
            +
                build_tree_kernel_efficient,
         
     | 
| 
      
 38 
     | 
    
         
            +
                organize_draft_results,
         
     | 
| 
      
 39 
     | 
    
         
            +
            )
         
     | 
| 
       42 
40 
     | 
    
         
             
            from sglang.srt.speculative.spec_info import SpeculativeAlgorithm
         
     | 
| 
       43 
41 
     | 
    
         
             
            from sglang.srt.speculative.spec_utils import (
         
     | 
| 
       44 
42 
     | 
    
         
             
                assign_draft_cache_locs,
         
     | 
| 
      
 43 
     | 
    
         
            +
                detect_nan,
         
     | 
| 
      
 44 
     | 
    
         
            +
                draft_tp_context,
         
     | 
| 
       45 
45 
     | 
    
         
             
                fast_topk,
         
     | 
| 
       46 
46 
     | 
    
         
             
                generate_token_bitmask,
         
     | 
| 
      
 47 
     | 
    
         
            +
                load_token_map,
         
     | 
| 
       47 
48 
     | 
    
         
             
                select_top_k_tokens,
         
     | 
| 
       48 
49 
     | 
    
         
             
            )
         
     | 
| 
       49 
50 
     | 
    
         
             
            from sglang.srt.utils import (
         
     | 
| 
       50 
51 
     | 
    
         
             
                empty_context,
         
     | 
| 
       51 
52 
     | 
    
         
             
                get_available_gpu_memory,
         
     | 
| 
       52 
53 
     | 
    
         
             
                get_bool_env_var,
         
     | 
| 
       53 
     | 
    
         
            -
                is_blackwell,
         
     | 
| 
       54 
54 
     | 
    
         
             
                is_cuda,
         
     | 
| 
       55 
55 
     | 
    
         
             
                next_power_of_2,
         
     | 
| 
       56 
56 
     | 
    
         
             
            )
         
     | 
| 
       57 
57 
     | 
    
         | 
| 
       58 
58 
     | 
    
         
             
            if is_cuda():
         
     | 
| 
       59 
     | 
    
         
            -
                from sgl_kernel import segment_packbits
         
     | 
| 
      
 59 
     | 
    
         
            +
                from sgl_kernel import segment_packbits  # noqa: F401
         
     | 
| 
       60 
60 
     | 
    
         | 
| 
       61 
61 
     | 
    
         
             
            logger = logging.getLogger(__name__)
         
     | 
| 
       62 
     | 
    
         
            -
             
     | 
| 
       63 
     | 
    
         
            -
             
     | 
| 
       64 
     | 
    
         
            -
             
     | 
| 
       65 
     | 
    
         
            -
            @contextmanager
         
     | 
| 
       66 
     | 
    
         
            -
            def draft_tp_context(tp_group: GroupCoordinator):
         
     | 
| 
       67 
     | 
    
         
            -
                # Draft model doesn't use dp and has its own tp group.
         
     | 
| 
       68 
     | 
    
         
            -
                # We disable mscclpp now because it doesn't support 2 comm groups.
         
     | 
| 
       69 
     | 
    
         
            -
                with patch_tensor_parallel_group(tp_group):
         
     | 
| 
       70 
     | 
    
         
            -
                    yield
         
     | 
| 
      
 62 
     | 
    
         
            +
            SGLANG_RETURN_ORIGINAL_LOGPROB = get_bool_env_var("SGLANG_RETURN_ORIGINAL_LOGPROB")
         
     | 
| 
       71 
63 
     | 
    
         | 
| 
       72 
64 
     | 
    
         | 
| 
       73 
65 
     | 
    
         
             
            class EAGLEWorker(TpModelWorker):
         
     | 
| 
         @@ -95,7 +87,6 @@ class EAGLEWorker(TpModelWorker): 
     | 
|
| 
       95 
87 
     | 
    
         
             
                    self.speculative_algorithm = SpeculativeAlgorithm.from_string(
         
     | 
| 
       96 
88 
     | 
    
         
             
                        server_args.speculative_algorithm
         
     | 
| 
       97 
89 
     | 
    
         
             
                    )
         
     | 
| 
       98 
     | 
    
         
            -
                    self.padded_static_len = -1
         
     | 
| 
       99 
90 
     | 
    
         | 
| 
       100 
91 
     | 
    
         
             
                    # Override the context length of the draft model to be the same as the target model.
         
     | 
| 
       101 
92 
     | 
    
         
             
                    server_args.context_length = target_worker.model_runner.model_config.context_len
         
     | 
| 
         @@ -187,208 +178,22 @@ class EAGLEWorker(TpModelWorker): 
     | 
|
| 
       187 
178 
     | 
    
         | 
| 
       188 
179 
     | 
    
         
             
                def init_attention_backend(self):
         
     | 
| 
       189 
180 
     | 
    
         
             
                    # Create multi-step attn backends and cuda graph runners
         
     | 
| 
       190 
     | 
    
         
            -
             
     | 
| 
       191 
     | 
    
         
            -
             
     | 
| 
       192 
     | 
    
         
            -
             
     | 
| 
      
 181 
     | 
    
         
            +
                    draft_backend_factory = DraftBackendFactory(
         
     | 
| 
      
 182 
     | 
    
         
            +
                        self.server_args,
         
     | 
| 
      
 183 
     | 
    
         
            +
                        self.draft_model_runner,
         
     | 
| 
      
 184 
     | 
    
         
            +
                        self.topk,
         
     | 
| 
      
 185 
     | 
    
         
            +
                        self.speculative_num_steps,
         
     | 
| 
      
 186 
     | 
    
         
            +
                    )
         
     | 
| 
       193 
187 
     | 
    
         | 
| 
       194 
188 
     | 
    
         
             
                    # Initialize decode attention backend
         
     | 
| 
       195 
     | 
    
         
            -
                    self.draft_attn_backend =  
     | 
| 
      
 189 
     | 
    
         
            +
                    self.draft_attn_backend = draft_backend_factory.create_decode_backend()
         
     | 
| 
       196 
190 
     | 
    
         | 
| 
       197 
191 
     | 
    
         
             
                    # Initialize draft extend attention backend (respects speculative_attention_mode setting)
         
     | 
| 
       198 
     | 
    
         
            -
                    self.draft_extend_attn_backend =  
     | 
| 
       199 
     | 
    
         
            -
             
     | 
| 
       200 
     | 
    
         
            -
                    self.draft_model_runner.draft_attn_backend = self.draft_attn_backend
         
     | 
| 
       201 
     | 
    
         
            -
             
     | 
| 
       202 
     | 
    
         
            -
                def _create_backend(
         
     | 
| 
       203 
     | 
    
         
            -
                    self, backend_name: str, backend_map: dict, error_template: str
         
     | 
| 
       204 
     | 
    
         
            -
                ):
         
     | 
| 
       205 
     | 
    
         
            -
                    backend_type = getattr(self.server_args, backend_name)
         
     | 
| 
       206 
     | 
    
         
            -
                    if backend_type is None:
         
     | 
| 
       207 
     | 
    
         
            -
                        backend_type = self.server_args.attention_backend
         
     | 
| 
       208 
     | 
    
         
            -
             
     | 
| 
       209 
     | 
    
         
            -
                    if backend_type not in backend_map:
         
     | 
| 
       210 
     | 
    
         
            -
                        raise ValueError(error_template.format(backend_type=backend_type))
         
     | 
| 
       211 
     | 
    
         
            -
             
     | 
| 
       212 
     | 
    
         
            -
                    return backend_map[backend_type]()
         
     | 
| 
       213 
     | 
    
         
            -
             
     | 
| 
       214 
     | 
    
         
            -
                def _create_decode_backend(self):
         
     | 
| 
       215 
     | 
    
         
            -
                    backend_map = {
         
     | 
| 
       216 
     | 
    
         
            -
                        "flashinfer": self._create_flashinfer_decode_backend,
         
     | 
| 
       217 
     | 
    
         
            -
                        "triton": self._create_triton_decode_backend,
         
     | 
| 
       218 
     | 
    
         
            -
                        "aiter": self._create_aiter_decode_backend,
         
     | 
| 
       219 
     | 
    
         
            -
                        "fa3": self._create_fa3_decode_backend,
         
     | 
| 
       220 
     | 
    
         
            -
                        "hybrid_linear_attn": (
         
     | 
| 
       221 
     | 
    
         
            -
                            self._create_fa3_decode_backend
         
     | 
| 
       222 
     | 
    
         
            -
                            if not is_blackwell()
         
     | 
| 
       223 
     | 
    
         
            -
                            else self._create_triton_decode_backend
         
     | 
| 
       224 
     | 
    
         
            -
                        ),
         
     | 
| 
       225 
     | 
    
         
            -
                        "flashmla": self._create_flashmla_decode_backend,
         
     | 
| 
       226 
     | 
    
         
            -
                        "trtllm_mha": self._create_trtllm_mha_decode_backend,
         
     | 
| 
       227 
     | 
    
         
            -
                        "trtllm_mla": self._create_trtllm_mla_decode_backend,
         
     | 
| 
       228 
     | 
    
         
            -
                    }
         
     | 
| 
       229 
     | 
    
         
            -
             
     | 
| 
       230 
     | 
    
         
            -
                    return self._create_backend(
         
     | 
| 
       231 
     | 
    
         
            -
                        "decode_attention_backend",
         
     | 
| 
       232 
     | 
    
         
            -
                        backend_map,
         
     | 
| 
       233 
     | 
    
         
            -
                        "EAGLE is not supported in decode attention backend {backend_type}",
         
     | 
| 
       234 
     | 
    
         
            -
                    )
         
     | 
| 
       235 
     | 
    
         
            -
             
     | 
| 
       236 
     | 
    
         
            -
                def _create_draft_extend_backend(self):
         
     | 
| 
       237 
     | 
    
         
            -
                    backend_map = {
         
     | 
| 
       238 
     | 
    
         
            -
                        "flashinfer": self._create_flashinfer_prefill_backend,
         
     | 
| 
       239 
     | 
    
         
            -
                        "triton": self._create_triton_prefill_backend,
         
     | 
| 
       240 
     | 
    
         
            -
                        "aiter": self._create_aiter_prefill_backend,
         
     | 
| 
       241 
     | 
    
         
            -
                        "fa3": self._create_fa3_prefill_backend,
         
     | 
| 
       242 
     | 
    
         
            -
                        "hybrid_linear_attn": (
         
     | 
| 
       243 
     | 
    
         
            -
                            self._create_fa3_prefill_backend
         
     | 
| 
       244 
     | 
    
         
            -
                            if not is_blackwell()
         
     | 
| 
       245 
     | 
    
         
            -
                            else self._create_triton_prefill_backend
         
     | 
| 
       246 
     | 
    
         
            -
                        ),
         
     | 
| 
       247 
     | 
    
         
            -
                        "flashmla": self._create_flashmla_prefill_backend,
         
     | 
| 
       248 
     | 
    
         
            -
                        "trtllm_mha": self._create_trtllm_mha_prefill_backend,
         
     | 
| 
       249 
     | 
    
         
            -
                        "trtllm_mla": self._create_trtllm_mla_prefill_backend,
         
     | 
| 
       250 
     | 
    
         
            -
                    }
         
     | 
| 
       251 
     | 
    
         
            -
                    backend_name = (
         
     | 
| 
       252 
     | 
    
         
            -
                        "decode_attention_backend"
         
     | 
| 
       253 
     | 
    
         
            -
                        if self.server_args.speculative_attention_mode == "decode"
         
     | 
| 
       254 
     | 
    
         
            -
                        else "prefill_attention_backend"
         
     | 
| 
       255 
     | 
    
         
            -
                    )
         
     | 
| 
       256 
     | 
    
         
            -
                    return self._create_backend(
         
     | 
| 
       257 
     | 
    
         
            -
                        backend_name,
         
     | 
| 
       258 
     | 
    
         
            -
                        backend_map,
         
     | 
| 
       259 
     | 
    
         
            -
                        "EAGLE is not supported in attention backend {backend_type}",
         
     | 
| 
       260 
     | 
    
         
            -
                    )
         
     | 
| 
       261 
     | 
    
         
            -
             
     | 
| 
       262 
     | 
    
         
            -
                def _create_flashinfer_decode_backend(self):
         
     | 
| 
       263 
     | 
    
         
            -
                    if not global_server_args_dict["use_mla_backend"]:
         
     | 
| 
       264 
     | 
    
         
            -
                        from sglang.srt.layers.attention.flashinfer_backend import (
         
     | 
| 
       265 
     | 
    
         
            -
                            FlashInferMultiStepDraftBackend,
         
     | 
| 
       266 
     | 
    
         
            -
                        )
         
     | 
| 
       267 
     | 
    
         
            -
             
     | 
| 
       268 
     | 
    
         
            -
                        self.has_prefill_wrapper_verify = True
         
     | 
| 
       269 
     | 
    
         
            -
                        return FlashInferMultiStepDraftBackend(
         
     | 
| 
       270 
     | 
    
         
            -
                            self.draft_model_runner, self.topk, self.speculative_num_steps
         
     | 
| 
       271 
     | 
    
         
            -
                        )
         
     | 
| 
       272 
     | 
    
         
            -
                    else:
         
     | 
| 
       273 
     | 
    
         
            -
                        from sglang.srt.layers.attention.flashinfer_mla_backend import (
         
     | 
| 
       274 
     | 
    
         
            -
                            FlashInferMLAMultiStepDraftBackend,
         
     | 
| 
       275 
     | 
    
         
            -
                        )
         
     | 
| 
       276 
     | 
    
         
            -
             
     | 
| 
       277 
     | 
    
         
            -
                        self.has_prefill_wrapper_verify = True
         
     | 
| 
       278 
     | 
    
         
            -
                        return FlashInferMLAMultiStepDraftBackend(
         
     | 
| 
       279 
     | 
    
         
            -
                            self.draft_model_runner, self.topk, self.speculative_num_steps
         
     | 
| 
       280 
     | 
    
         
            -
                        )
         
     | 
| 
       281 
     | 
    
         
            -
             
     | 
| 
       282 
     | 
    
         
            -
                def _create_triton_decode_backend(self):
         
     | 
| 
       283 
     | 
    
         
            -
                    from sglang.srt.layers.attention.triton_backend import (
         
     | 
| 
       284 
     | 
    
         
            -
                        TritonMultiStepDraftBackend,
         
     | 
| 
       285 
     | 
    
         
            -
                    )
         
     | 
| 
       286 
     | 
    
         
            -
             
     | 
| 
       287 
     | 
    
         
            -
                    return TritonMultiStepDraftBackend(
         
     | 
| 
       288 
     | 
    
         
            -
                        self.draft_model_runner, self.topk, self.speculative_num_steps
         
     | 
| 
       289 
     | 
    
         
            -
                    )
         
     | 
| 
       290 
     | 
    
         
            -
             
     | 
| 
       291 
     | 
    
         
            -
                def _create_aiter_decode_backend(self):
         
     | 
| 
       292 
     | 
    
         
            -
                    from sglang.srt.layers.attention.aiter_backend import AiterMultiStepDraftBackend
         
     | 
| 
       293 
     | 
    
         
            -
             
     | 
| 
       294 
     | 
    
         
            -
                    return AiterMultiStepDraftBackend(
         
     | 
| 
       295 
     | 
    
         
            -
                        self.draft_model_runner, self.topk, self.speculative_num_steps
         
     | 
| 
       296 
     | 
    
         
            -
                    )
         
     | 
| 
       297 
     | 
    
         
            -
             
     | 
| 
       298 
     | 
    
         
            -
                def _create_fa3_decode_backend(self):
         
     | 
| 
       299 
     | 
    
         
            -
                    from sglang.srt.layers.attention.flashattention_backend import (
         
     | 
| 
       300 
     | 
    
         
            -
                        FlashAttentionMultiStepBackend,
         
     | 
| 
       301 
     | 
    
         
            -
                    )
         
     | 
| 
       302 
     | 
    
         
            -
             
     | 
| 
       303 
     | 
    
         
            -
                    return FlashAttentionMultiStepBackend(
         
     | 
| 
       304 
     | 
    
         
            -
                        self.draft_model_runner, self.topk, self.speculative_num_steps
         
     | 
| 
       305 
     | 
    
         
            -
                    )
         
     | 
| 
       306 
     | 
    
         
            -
             
     | 
| 
       307 
     | 
    
         
            -
                def _create_flashmla_decode_backend(self):
         
     | 
| 
       308 
     | 
    
         
            -
                    from sglang.srt.layers.attention.flashmla_backend import (
         
     | 
| 
       309 
     | 
    
         
            -
                        FlashMLAMultiStepDraftBackend,
         
     | 
| 
       310 
     | 
    
         
            -
                    )
         
     | 
| 
       311 
     | 
    
         
            -
             
     | 
| 
       312 
     | 
    
         
            -
                    return FlashMLAMultiStepDraftBackend(
         
     | 
| 
       313 
     | 
    
         
            -
                        self.draft_model_runner, self.topk, self.speculative_num_steps
         
     | 
| 
       314 
     | 
    
         
            -
                    )
         
     | 
| 
       315 
     | 
    
         
            -
             
     | 
| 
       316 
     | 
    
         
            -
                def _create_trtllm_mha_decode_backend(self):
         
     | 
| 
       317 
     | 
    
         
            -
                    from sglang.srt.layers.attention.trtllm_mha_backend import (
         
     | 
| 
       318 
     | 
    
         
            -
                        TRTLLMHAAttnMultiStepDraftBackend,
         
     | 
| 
       319 
     | 
    
         
            -
                    )
         
     | 
| 
       320 
     | 
    
         
            -
             
     | 
| 
       321 
     | 
    
         
            -
                    self.has_prefill_wrapper_verify = True
         
     | 
| 
       322 
     | 
    
         
            -
                    return TRTLLMHAAttnMultiStepDraftBackend(
         
     | 
| 
       323 
     | 
    
         
            -
                        self.draft_model_runner, self.topk, self.speculative_num_steps
         
     | 
| 
       324 
     | 
    
         
            -
                    )
         
     | 
| 
       325 
     | 
    
         
            -
             
     | 
| 
       326 
     | 
    
         
            -
                def _create_trtllm_mla_decode_backend(self):
         
     | 
| 
       327 
     | 
    
         
            -
                    if not global_server_args_dict["use_mla_backend"]:
         
     | 
| 
       328 
     | 
    
         
            -
                        raise ValueError(
         
     | 
| 
       329 
     | 
    
         
            -
                            "trtllm_mla backend requires MLA model (use_mla_backend=True)."
         
     | 
| 
       330 
     | 
    
         
            -
                        )
         
     | 
| 
       331 
     | 
    
         
            -
             
     | 
| 
       332 
     | 
    
         
            -
                    from sglang.srt.layers.attention.trtllm_mla_backend import (
         
     | 
| 
       333 
     | 
    
         
            -
                        TRTLLMMLAMultiStepDraftBackend,
         
     | 
| 
       334 
     | 
    
         
            -
                    )
         
     | 
| 
       335 
     | 
    
         
            -
             
     | 
| 
       336 
     | 
    
         
            -
                    self.has_prefill_wrapper_verify = True
         
     | 
| 
       337 
     | 
    
         
            -
                    return TRTLLMMLAMultiStepDraftBackend(
         
     | 
| 
       338 
     | 
    
         
            -
                        self.draft_model_runner, self.topk, self.speculative_num_steps
         
     | 
| 
       339 
     | 
    
         
            -
                    )
         
     | 
| 
       340 
     | 
    
         
            -
             
     | 
| 
       341 
     | 
    
         
            -
                def _create_flashinfer_prefill_backend(self):
         
     | 
| 
       342 
     | 
    
         
            -
                    if not global_server_args_dict["use_mla_backend"]:
         
     | 
| 
       343 
     | 
    
         
            -
                        from sglang.srt.layers.attention.flashinfer_backend import (
         
     | 
| 
       344 
     | 
    
         
            -
                            FlashInferAttnBackend,
         
     | 
| 
       345 
     | 
    
         
            -
                        )
         
     | 
| 
       346 
     | 
    
         
            -
             
     | 
| 
       347 
     | 
    
         
            -
                        return FlashInferAttnBackend(self.draft_model_runner, skip_prefill=False)
         
     | 
| 
       348 
     | 
    
         
            -
                    else:
         
     | 
| 
       349 
     | 
    
         
            -
                        from sglang.srt.layers.attention.flashinfer_mla_backend import (
         
     | 
| 
       350 
     | 
    
         
            -
                            FlashInferMLAAttnBackend,
         
     | 
| 
       351 
     | 
    
         
            -
                        )
         
     | 
| 
       352 
     | 
    
         
            -
             
     | 
| 
       353 
     | 
    
         
            -
                        return FlashInferMLAAttnBackend(self.draft_model_runner, skip_prefill=False)
         
     | 
| 
       354 
     | 
    
         
            -
             
     | 
| 
       355 
     | 
    
         
            -
                def _create_triton_prefill_backend(self):
         
     | 
| 
       356 
     | 
    
         
            -
                    from sglang.srt.layers.attention.triton_backend import TritonAttnBackend
         
     | 
| 
       357 
     | 
    
         
            -
             
     | 
| 
       358 
     | 
    
         
            -
                    return TritonAttnBackend(self.draft_model_runner, skip_prefill=False)
         
     | 
| 
       359 
     | 
    
         
            -
             
     | 
| 
       360 
     | 
    
         
            -
                def _create_aiter_prefill_backend(self):
         
     | 
| 
       361 
     | 
    
         
            -
                    from sglang.srt.layers.attention.aiter_backend import AiterAttnBackend
         
     | 
| 
       362 
     | 
    
         
            -
             
     | 
| 
       363 
     | 
    
         
            -
                    return AiterAttnBackend(self.draft_model_runner, skip_prefill=False)
         
     | 
| 
       364 
     | 
    
         
            -
             
     | 
| 
       365 
     | 
    
         
            -
                def _create_fa3_prefill_backend(self):
         
     | 
| 
       366 
     | 
    
         
            -
                    from sglang.srt.layers.attention.flashattention_backend import (
         
     | 
| 
       367 
     | 
    
         
            -
                        FlashAttentionBackend,
         
     | 
| 
      
 192 
     | 
    
         
            +
                    self.draft_extend_attn_backend = (
         
     | 
| 
      
 193 
     | 
    
         
            +
                        draft_backend_factory.create_draft_extend_backend()
         
     | 
| 
       368 
194 
     | 
    
         
             
                    )
         
     | 
| 
       369 
195 
     | 
    
         | 
| 
       370 
     | 
    
         
            -
                     
     | 
| 
       371 
     | 
    
         
            -
             
     | 
| 
       372 
     | 
    
         
            -
                def _create_trtllm_mha_prefill_backend(self):
         
     | 
| 
       373 
     | 
    
         
            -
                    from sglang.srt.layers.attention.trtllm_mha_backend import TRTLLMHAAttnBackend
         
     | 
| 
       374 
     | 
    
         
            -
             
     | 
| 
       375 
     | 
    
         
            -
                    return TRTLLMHAAttnBackend(self.draft_model_runner, skip_prefill=False)
         
     | 
| 
       376 
     | 
    
         
            -
             
     | 
| 
       377 
     | 
    
         
            -
                def _create_trtllm_mla_prefill_backend(self):
         
     | 
| 
       378 
     | 
    
         
            -
                    if not global_server_args_dict["use_mla_backend"]:
         
     | 
| 
       379 
     | 
    
         
            -
                        raise ValueError(
         
     | 
| 
       380 
     | 
    
         
            -
                            "trtllm_mla backend requires MLA model (use_mla_backend=True)."
         
     | 
| 
       381 
     | 
    
         
            -
                        )
         
     | 
| 
       382 
     | 
    
         
            -
             
     | 
| 
       383 
     | 
    
         
            -
                    from sglang.srt.layers.attention.trtllm_mla_backend import TRTLLMMLABackend
         
     | 
| 
       384 
     | 
    
         
            -
             
     | 
| 
       385 
     | 
    
         
            -
                    return TRTLLMMLABackend(self.draft_model_runner, skip_prefill=False)
         
     | 
| 
       386 
     | 
    
         
            -
             
     | 
| 
       387 
     | 
    
         
            -
                def _create_flashmla_prefill_backend(self):
         
     | 
| 
       388 
     | 
    
         
            -
                    logger.warning(
         
     | 
| 
       389 
     | 
    
         
            -
                        "flashmla prefill backend is not yet supported for draft extend."
         
     | 
| 
       390 
     | 
    
         
            -
                    )
         
     | 
| 
       391 
     | 
    
         
            -
                    return None
         
     | 
| 
      
 196 
     | 
    
         
            +
                    self.draft_model_runner.draft_attn_backend = self.draft_attn_backend
         
     | 
| 
       392 
197 
     | 
    
         | 
| 
       393 
198 
     | 
    
         
             
                def init_cuda_graphs(self):
         
     | 
| 
       394 
199 
     | 
    
         
             
                    """Capture cuda graphs."""
         
     | 
| 
         @@ -399,16 +204,17 @@ class EAGLEWorker(TpModelWorker): 
     | 
|
| 
       399 
204 
     | 
    
         
             
                        return
         
     | 
| 
       400 
205 
     | 
    
         | 
| 
       401 
206 
     | 
    
         
             
                    # Capture draft
         
     | 
| 
       402 
     | 
    
         
            -
                     
     | 
| 
       403 
     | 
    
         
            -
             
     | 
| 
       404 
     | 
    
         
            -
             
     | 
| 
       405 
     | 
    
         
            -
                         
     | 
| 
       406 
     | 
    
         
            -
             
     | 
| 
       407 
     | 
    
         
            -
             
     | 
| 
       408 
     | 
    
         
            -
             
     | 
| 
       409 
     | 
    
         
            -
             
     | 
| 
       410 
     | 
    
         
            -
                         
     | 
| 
       411 
     | 
    
         
            -
             
     | 
| 
      
 207 
     | 
    
         
            +
                    if self.speculative_num_steps > 1:
         
     | 
| 
      
 208 
     | 
    
         
            +
                        tic = time.perf_counter()
         
     | 
| 
      
 209 
     | 
    
         
            +
                        before_mem = get_available_gpu_memory(self.device, self.gpu_id)
         
     | 
| 
      
 210 
     | 
    
         
            +
                        logger.info(
         
     | 
| 
      
 211 
     | 
    
         
            +
                            f"Capture draft cuda graph begin. This can take up to several minutes. avail mem={before_mem:.2f} GB"
         
     | 
| 
      
 212 
     | 
    
         
            +
                        )
         
     | 
| 
      
 213 
     | 
    
         
            +
                        self.cuda_graph_runner = EAGLEDraftCudaGraphRunner(self)
         
     | 
| 
      
 214 
     | 
    
         
            +
                        after_mem = get_available_gpu_memory(self.device, self.gpu_id)
         
     | 
| 
      
 215 
     | 
    
         
            +
                        logger.info(
         
     | 
| 
      
 216 
     | 
    
         
            +
                            f"Capture draft cuda graph end. Time elapsed: {time.perf_counter() - tic:.2f} s. mem usage={(before_mem - after_mem):.2f} GB. avail mem={after_mem:.2f} GB."
         
     | 
| 
      
 217 
     | 
    
         
            +
                        )
         
     | 
| 
       412 
218 
     | 
    
         | 
| 
       413 
219 
     | 
    
         
             
                    # Capture extend
         
     | 
| 
       414 
220 
     | 
    
         
             
                    if self.draft_extend_attn_backend:
         
     | 
| 
         @@ -429,7 +235,7 @@ class EAGLEWorker(TpModelWorker): 
     | 
|
| 
       429 
235 
     | 
    
         
             
                def draft_model_runner(self):
         
     | 
| 
       430 
236 
     | 
    
         
             
                    return self.model_runner
         
     | 
| 
       431 
237 
     | 
    
         | 
| 
       432 
     | 
    
         
            -
                def forward_batch_generation(self, batch: ScheduleBatch) ->  
     | 
| 
      
 238 
     | 
    
         
            +
                def forward_batch_generation(self, batch: ScheduleBatch) -> GenerationBatchResult:
         
     | 
| 
       433 
239 
     | 
    
         
             
                    """Run speculative decoding forward.
         
     | 
| 
       434 
240 
     | 
    
         | 
| 
       435 
241 
     | 
    
         
             
                    NOTE: Many states of batch is modified as you go through. It is not guaranteed that
         
     | 
| 
         @@ -449,7 +255,7 @@ class EAGLEWorker(TpModelWorker): 
     | 
|
| 
       449 
255 
     | 
    
         
             
                            self.forward_draft_extend(
         
     | 
| 
       450 
256 
     | 
    
         
             
                                batch, logits_output.hidden_states, next_token_ids, seq_lens_cpu
         
     | 
| 
       451 
257 
     | 
    
         
             
                            )
         
     | 
| 
       452 
     | 
    
         
            -
                        return  
     | 
| 
      
 258 
     | 
    
         
            +
                        return GenerationBatchResult(
         
     | 
| 
       453 
259 
     | 
    
         
             
                            logits_output=logits_output,
         
     | 
| 
       454 
260 
     | 
    
         
             
                            next_token_ids=next_token_ids,
         
     | 
| 
       455 
261 
     | 
    
         
             
                            num_accepted_tokens=0,
         
     | 
| 
         @@ -472,7 +278,7 @@ class EAGLEWorker(TpModelWorker): 
     | 
|
| 
       472 
278 
     | 
    
         
             
                                # decode is not finished
         
     | 
| 
       473 
279 
     | 
    
         
             
                                self.forward_draft_extend_after_decode(batch)
         
     | 
| 
       474 
280 
     | 
    
         | 
| 
       475 
     | 
    
         
            -
                        return  
     | 
| 
      
 281 
     | 
    
         
            +
                        return GenerationBatchResult(
         
     | 
| 
       476 
282 
     | 
    
         
             
                            logits_output=logits_output,
         
     | 
| 
       477 
283 
     | 
    
         
             
                            next_token_ids=verify_output.verified_id,
         
     | 
| 
       478 
284 
     | 
    
         
             
                            num_accepted_tokens=sum(verify_output.accept_length_per_req_cpu),
         
     | 
| 
         @@ -513,12 +319,10 @@ class EAGLEWorker(TpModelWorker): 
     | 
|
| 
       513 
319 
     | 
    
         
             
                    # We need the full hidden states to prefill the KV cache of the draft model.
         
     | 
| 
       514 
320 
     | 
    
         
             
                    model_worker_batch = batch.get_model_worker_batch()
         
     | 
| 
       515 
321 
     | 
    
         
             
                    model_worker_batch.capture_hidden_mode = CaptureHiddenMode.FULL
         
     | 
| 
       516 
     | 
    
         
            -
                     
     | 
| 
       517 
     | 
    
         
            -
                        model_worker_batch
         
     | 
| 
       518 
     | 
    
         
            -
                    )
         
     | 
| 
      
 322 
     | 
    
         
            +
                    batch_result = self.target_worker.forward_batch_generation(model_worker_batch)
         
     | 
| 
       519 
323 
     | 
    
         
             
                    logits_output, next_token_ids = (
         
     | 
| 
       520 
     | 
    
         
            -
                         
     | 
| 
       521 
     | 
    
         
            -
                         
     | 
| 
      
 324 
     | 
    
         
            +
                        batch_result.logits_output,
         
     | 
| 
      
 325 
     | 
    
         
            +
                        batch_result.next_token_ids,
         
     | 
| 
       522 
326 
     | 
    
         
             
                    )
         
     | 
| 
       523 
327 
     | 
    
         
             
                    return (
         
     | 
| 
       524 
328 
     | 
    
         
             
                        logits_output,
         
     | 
| 
         @@ -543,8 +347,10 @@ class EAGLEWorker(TpModelWorker): 
     | 
|
| 
       543 
347 
     | 
    
         
             
                    # [       topk 0         ] [       topk 1         ]
         
     | 
| 
       544 
348 
     | 
    
         
             
                    # [iter=0, iter=1, iter=2] [iter=0, iter=1, iter=2]
         
     | 
| 
       545 
349 
     | 
    
         
             
                    if self.page_size == 1:
         
     | 
| 
       546 
     | 
    
         
            -
                        out_cache_loc, token_to_kv_pool_state_backup =  
     | 
| 
       547 
     | 
    
         
            -
                             
     | 
| 
      
 350 
     | 
    
         
            +
                        out_cache_loc, token_to_kv_pool_state_backup = alloc_token_slots(
         
     | 
| 
      
 351 
     | 
    
         
            +
                            batch.tree_cache,
         
     | 
| 
      
 352 
     | 
    
         
            +
                            num_seqs * self.speculative_num_steps * self.topk,
         
     | 
| 
      
 353 
     | 
    
         
            +
                            backup_state=True,
         
     | 
| 
       548 
354 
     | 
    
         
             
                        )
         
     | 
| 
       549 
355 
     | 
    
         
             
                    else:
         
     | 
| 
       550 
356 
     | 
    
         
             
                        if self.topk == 1:
         
     | 
| 
         @@ -603,7 +409,8 @@ class EAGLEWorker(TpModelWorker): 
     | 
|
| 
       603 
409 
     | 
    
         
             
                            extend_num_tokens = torch.sum((seq_lens_cpu - prefix_lens_cpu)).item()
         
     | 
| 
       604 
410 
     | 
    
         | 
| 
       605 
411 
     | 
    
         
             
                        out_cache_loc, token_to_kv_pool_state_backup = (
         
     | 
| 
       606 
     | 
    
         
            -
                             
     | 
| 
      
 412 
     | 
    
         
            +
                            alloc_paged_token_slots_extend(
         
     | 
| 
      
 413 
     | 
    
         
            +
                                batch.tree_cache,
         
     | 
| 
       607 
414 
     | 
    
         
             
                                prefix_lens,
         
     | 
| 
       608 
415 
     | 
    
         
             
                                prefix_lens_cpu,
         
     | 
| 
       609 
416 
     | 
    
         
             
                                seq_lens,
         
     | 
| 
         @@ -675,16 +482,21 @@ class EAGLEWorker(TpModelWorker): 
     | 
|
| 
       675 
482 
     | 
    
         
             
                        forward_batch
         
     | 
| 
       676 
483 
     | 
    
         
             
                    )
         
     | 
| 
       677 
484 
     | 
    
         
             
                    if can_cuda_graph:
         
     | 
| 
       678 
     | 
    
         
            -
                         
     | 
| 
      
 485 
     | 
    
         
            +
                        parent_list, top_scores_index, draft_tokens = self.cuda_graph_runner.replay(
         
     | 
| 
       679 
486 
     | 
    
         
             
                            forward_batch
         
     | 
| 
       680 
487 
     | 
    
         
             
                        )
         
     | 
| 
       681 
488 
     | 
    
         
             
                    else:
         
     | 
| 
       682 
489 
     | 
    
         
             
                        forward_batch.can_run_dp_cuda_graph = False
         
     | 
| 
       683 
     | 
    
         
            -
                        if  
     | 
| 
       684 
     | 
    
         
            -
                             
     | 
| 
      
 490 
     | 
    
         
            +
                        if (
         
     | 
| 
      
 491 
     | 
    
         
            +
                            not forward_batch.forward_mode.is_idle()
         
     | 
| 
      
 492 
     | 
    
         
            +
                            and self.speculative_num_steps > 1
         
     | 
| 
      
 493 
     | 
    
         
            +
                        ):
         
     | 
| 
      
 494 
     | 
    
         
            +
                            # Skip attention backend init for idle mode or 1-step draft
         
     | 
| 
       685 
495 
     | 
    
         
             
                            self.draft_attn_backend.init_forward_metadata(forward_batch)
         
     | 
| 
       686 
496 
     | 
    
         
             
                        # Run forward steps
         
     | 
| 
       687 
     | 
    
         
            -
                         
     | 
| 
      
 497 
     | 
    
         
            +
                        parent_list, top_scores_index, draft_tokens = self.draft_forward(
         
     | 
| 
      
 498 
     | 
    
         
            +
                            forward_batch
         
     | 
| 
      
 499 
     | 
    
         
            +
                        )
         
     | 
| 
       688 
500 
     | 
    
         | 
| 
       689 
501 
     | 
    
         
             
                    if batch.forward_mode.is_idle():
         
     | 
| 
       690 
502 
     | 
    
         
             
                        return EagleVerifyInput.create_idle_input(
         
     | 
| 
         @@ -702,9 +514,9 @@ class EAGLEWorker(TpModelWorker): 
     | 
|
| 
       702 
514 
     | 
    
         
             
                        draft_tokens,
         
     | 
| 
       703 
515 
     | 
    
         
             
                    ) = build_tree_kernel_efficient(
         
     | 
| 
       704 
516 
     | 
    
         
             
                        spec_info.verified_id,
         
     | 
| 
       705 
     | 
    
         
            -
                         
     | 
| 
       706 
     | 
    
         
            -
                         
     | 
| 
       707 
     | 
    
         
            -
                         
     | 
| 
      
 517 
     | 
    
         
            +
                        parent_list,
         
     | 
| 
      
 518 
     | 
    
         
            +
                        top_scores_index,
         
     | 
| 
      
 519 
     | 
    
         
            +
                        draft_tokens,
         
     | 
| 
       708 
520 
     | 
    
         
             
                        batch.seq_lens,
         
     | 
| 
       709 
521 
     | 
    
         
             
                        batch.seq_lens_sum,
         
     | 
| 
       710 
522 
     | 
    
         
             
                        self.topk,
         
     | 
| 
         @@ -786,18 +598,23 @@ class EAGLEWorker(TpModelWorker): 
     | 
|
| 
       786 
598 
     | 
    
         
             
                        logits_output, _ = self.draft_model_runner.forward(
         
     | 
| 
       787 
599 
     | 
    
         
             
                            forward_batch, skip_attn_backend_init=True
         
     | 
| 
       788 
600 
     | 
    
         
             
                        )
         
     | 
| 
       789 
     | 
    
         
            -
                        self. 
     | 
| 
      
 601 
     | 
    
         
            +
                        if self.server_args.enable_nan_detection:
         
     | 
| 
      
 602 
     | 
    
         
            +
                            detect_nan(logits_output)
         
     | 
| 
       790 
603 
     | 
    
         
             
                        probs = torch.softmax(logits_output.next_token_logits, dim=-1)
         
     | 
| 
       791 
604 
     | 
    
         
             
                        topk_p, topk_index = fast_topk(probs, self.topk, dim=-1)
         
     | 
| 
       792 
605 
     | 
    
         
             
                        if self.hot_token_id is not None:
         
     | 
| 
       793 
606 
     | 
    
         
             
                            topk_index = self.hot_token_id[topk_index]
         
     | 
| 
       794 
607 
     | 
    
         
             
                        hidden_states = logits_output.hidden_states
         
     | 
| 
       795 
608 
     | 
    
         | 
| 
       796 
     | 
    
         
            -
                     
     | 
| 
      
 609 
     | 
    
         
            +
                    parent_list, top_scores_index, draft_tokens = organize_draft_results(
         
     | 
| 
      
 610 
     | 
    
         
            +
                        score_list, token_list, parents_list, self.speculative_num_draft_tokens
         
     | 
| 
      
 611 
     | 
    
         
            +
                    )
         
     | 
| 
      
 612 
     | 
    
         
            +
             
     | 
| 
      
 613 
     | 
    
         
            +
                    return parent_list, top_scores_index, draft_tokens
         
     | 
| 
       797 
614 
     | 
    
         | 
| 
       798 
615 
     | 
    
         
             
                def clear_cache_pool(self):
         
     | 
| 
       799 
     | 
    
         
            -
                     
     | 
| 
       800 
     | 
    
         
            -
                     
     | 
| 
      
 616 
     | 
    
         
            +
                    # allocator and kv cache pool are shared with target worker
         
     | 
| 
      
 617 
     | 
    
         
            +
                    pass
         
     | 
| 
       801 
618 
     | 
    
         | 
| 
       802 
619 
     | 
    
         
             
                def verify(self, batch: ScheduleBatch, spec_info: EagleVerifyInput):
         
     | 
| 
       803 
620 
     | 
    
         
             
                    spec_info.prepare_for_verify(batch, self.page_size)
         
     | 
| 
         @@ -822,12 +639,12 @@ class EAGLEWorker(TpModelWorker): 
     | 
|
| 
       822 
639 
     | 
    
         
             
                        ).cpu()
         
     | 
| 
       823 
640 
     | 
    
         | 
| 
       824 
641 
     | 
    
         
             
                    # Forward
         
     | 
| 
       825 
     | 
    
         
            -
                     
     | 
| 
      
 642 
     | 
    
         
            +
                    batch_result = self.target_worker.forward_batch_generation(
         
     | 
| 
       826 
643 
     | 
    
         
             
                        model_worker_batch, is_verify=True
         
     | 
| 
       827 
644 
     | 
    
         
             
                    )
         
     | 
| 
       828 
645 
     | 
    
         
             
                    logits_output, can_run_cuda_graph = (
         
     | 
| 
       829 
     | 
    
         
            -
                         
     | 
| 
       830 
     | 
    
         
            -
                         
     | 
| 
      
 646 
     | 
    
         
            +
                        batch_result.logits_output,
         
     | 
| 
      
 647 
     | 
    
         
            +
                        batch_result.can_run_cuda_graph,
         
     | 
| 
       831 
648 
     | 
    
         
             
                    )
         
     | 
| 
       832 
649 
     | 
    
         | 
| 
       833 
650 
     | 
    
         
             
                    vocab_mask = None
         
     | 
| 
         @@ -850,7 +667,9 @@ class EAGLEWorker(TpModelWorker): 
     | 
|
| 
       850 
667 
     | 
    
         
             
                            # and will be applied to produce wrong results
         
     | 
| 
       851 
668 
     | 
    
         
             
                            batch.sampling_info.vocab_mask = None
         
     | 
| 
       852 
669 
     | 
    
         | 
| 
       853 
     | 
    
         
            -
                    self. 
     | 
| 
      
 670 
     | 
    
         
            +
                    if self.enable_nan_detection:
         
     | 
| 
      
 671 
     | 
    
         
            +
                        detect_nan(logits_output)
         
     | 
| 
      
 672 
     | 
    
         
            +
             
     | 
| 
       854 
673 
     | 
    
         
             
                    spec_info.hidden_states = logits_output.hidden_states
         
     | 
| 
       855 
674 
     | 
    
         
             
                    res: EagleVerifyOutput = spec_info.verify(
         
     | 
| 
       856 
675 
     | 
    
         
             
                        batch,
         
     | 
| 
         @@ -868,7 +687,7 @@ class EAGLEWorker(TpModelWorker): 
     | 
|
| 
       868 
687 
     | 
    
         
             
                    logits_output.hidden_states = logits_output.hidden_states[res.accepted_indices]
         
     | 
| 
       869 
688 
     | 
    
         | 
| 
       870 
689 
     | 
    
         
             
                    # QQ: can be optimized
         
     | 
| 
       871 
     | 
    
         
            -
                    if self.target_worker.model_runner. 
     | 
| 
      
 690 
     | 
    
         
            +
                    if self.target_worker.model_runner.hybrid_gdn_config is not None:
         
     | 
| 
       872 
691 
     | 
    
         
             
                        # res.draft_input.accept_length is on GPU but may be empty for last verify?
         
     | 
| 
       873 
692 
     | 
    
         
             
                        accepted_length = (
         
     | 
| 
       874 
693 
     | 
    
         
             
                            torch.tensor(
         
     | 
| 
         @@ -911,7 +730,7 @@ class EAGLEWorker(TpModelWorker): 
     | 
|
| 
       911 
730 
     | 
    
         
             
                    # acceptance indices are the indices in a "flattened" batch.
         
     | 
| 
       912 
731 
     | 
    
         
             
                    # dividing it to num_draft_tokens will yield the actual batch index.
         
     | 
| 
       913 
732 
     | 
    
         
             
                    temperatures = temperatures[accepted_indices // num_draft_tokens]
         
     | 
| 
       914 
     | 
    
         
            -
                    if  
     | 
| 
      
 733 
     | 
    
         
            +
                    if SGLANG_RETURN_ORIGINAL_LOGPROB:
         
     | 
| 
       915 
734 
     | 
    
         
             
                        logprobs = torch.nn.functional.log_softmax(
         
     | 
| 
       916 
735 
     | 
    
         
             
                            logits_output.next_token_logits, dim=-1
         
     | 
| 
       917 
736 
     | 
    
         
             
                        )
         
     | 
| 
         @@ -1003,7 +822,8 @@ class EAGLEWorker(TpModelWorker): 
     | 
|
| 
       1003 
822 
     | 
    
         
             
                    )
         
     | 
| 
       1004 
823 
     | 
    
         
             
                    forward_batch.return_logprob = False
         
     | 
| 
       1005 
824 
     | 
    
         
             
                    logits_output, _ = self.draft_model_runner.forward(forward_batch)
         
     | 
| 
       1006 
     | 
    
         
            -
                    self. 
     | 
| 
      
 825 
     | 
    
         
            +
                    if self.enable_nan_detection:
         
     | 
| 
      
 826 
     | 
    
         
            +
                        detect_nan(logits_output)
         
     | 
| 
       1007 
827 
     | 
    
         
             
                    assert isinstance(forward_batch.spec_info, EagleDraftInput)
         
     | 
| 
       1008 
828 
     | 
    
         
             
                    assert forward_batch.spec_info is batch.spec_info
         
     | 
| 
       1009 
829 
     | 
    
         
             
                    self.capture_for_decode(logits_output, forward_batch.spec_info)
         
     | 
| 
         @@ -1098,7 +918,8 @@ class EAGLEWorker(TpModelWorker): 
     | 
|
| 
       1098 
918 
     | 
    
         
             
                        )
         
     | 
| 
       1099 
919 
     | 
    
         
             
                        self.capture_for_decode(logits_output, forward_batch.spec_info)
         
     | 
| 
       1100 
920 
     | 
    
         | 
| 
       1101 
     | 
    
         
            -
                    self. 
     | 
| 
      
 921 
     | 
    
         
            +
                    if self.enable_nan_detection:
         
     | 
| 
      
 922 
     | 
    
         
            +
                        detect_nan(logits_output)
         
     | 
| 
       1102 
923 
     | 
    
         | 
| 
       1103 
924 
     | 
    
         
             
                    # Restore backup.
         
     | 
| 
       1104 
925 
     | 
    
         
             
                    # This is because `seq_lens` can be modified in `prepare_extend_after_decode`
         
     | 
| 
         @@ -1118,24 +939,6 @@ class EAGLEWorker(TpModelWorker): 
     | 
|
| 
       1118 
939 
     | 
    
         
             
                    draft_input.topk_p, draft_input.topk_index = fast_topk(probs, self.topk, dim=-1)
         
     | 
| 
       1119 
940 
     | 
    
         
             
                    draft_input.hidden_states = logits_output.hidden_states
         
     | 
| 
       1120 
941 
     | 
    
         | 
| 
       1121 
     | 
    
         
            -
                def _detect_nan_if_needed(self, logits_output: LogitsProcessorOutput):
         
     | 
| 
       1122 
     | 
    
         
            -
                    if self.enable_nan_detection:
         
     | 
| 
       1123 
     | 
    
         
            -
                        logits = logits_output.next_token_logits
         
     | 
| 
       1124 
     | 
    
         
            -
                        if torch.any(torch.isnan(logits)):
         
     | 
| 
       1125 
     | 
    
         
            -
                            logger.error("Detected errors during sampling! NaN in the logits.")
         
     | 
| 
       1126 
     | 
    
         
            -
                            raise ValueError("Detected errors during sampling! NaN in the logits.")
         
     | 
| 
       1127 
     | 
    
         
            -
             
     | 
| 
       1128 
     | 
    
         
            -
             
     | 
| 
       1129 
     | 
    
         
            -
            def load_token_map(token_map_path: str) -> List[int]:
         
     | 
| 
       1130 
     | 
    
         
            -
                if not os.path.exists(token_map_path):
         
     | 
| 
       1131 
     | 
    
         
            -
                    cache_dir = snapshot_download(
         
     | 
| 
       1132 
     | 
    
         
            -
                        os.path.dirname(token_map_path),
         
     | 
| 
       1133 
     | 
    
         
            -
                        ignore_patterns=["*.bin", "*.safetensors"],
         
     | 
| 
       1134 
     | 
    
         
            -
                    )
         
     | 
| 
       1135 
     | 
    
         
            -
                    token_map_path = os.path.join(cache_dir, os.path.basename(token_map_path))
         
     | 
| 
       1136 
     | 
    
         
            -
                hot_token_id = torch.load(token_map_path, weights_only=True)
         
     | 
| 
       1137 
     | 
    
         
            -
                return torch.tensor(hot_token_id, dtype=torch.int64)
         
     | 
| 
       1138 
     | 
    
         
            -
             
     | 
| 
       1139 
942 
     | 
    
         | 
| 
       1140 
943 
     | 
    
         
             
            @torch.compile(dynamic=True)
         
     | 
| 
       1141 
944 
     | 
    
         
             
            def get_last_loc_large_page_size_top_k_1(
         
     |