sglang 0.5.3rc2__py3-none-any.whl → 0.5.4.post1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sglang/bench_one_batch.py +47 -28
 - sglang/bench_one_batch_server.py +41 -25
 - sglang/bench_serving.py +378 -160
 - sglang/check_env.py +1 -1
 - sglang/compile_deep_gemm.py +6 -2
 - sglang/global_config.py +1 -25
 - sglang/lang/api.py +6 -0
 - sglang/lang/interpreter.py +1 -0
 - sglang/lang/ir.py +13 -0
 - sglang/launch_server.py +10 -15
 - sglang/profiler.py +18 -1
 - sglang/srt/_custom_ops.py +1 -1
 - sglang/srt/batch_invariant_ops/batch_invariant_ops.py +105 -10
 - sglang/srt/checkpoint_engine/checkpoint_engine_worker.py +142 -0
 - sglang/srt/compilation/backend.py +437 -0
 - sglang/srt/compilation/compilation_config.py +20 -0
 - sglang/srt/compilation/compilation_counter.py +47 -0
 - sglang/srt/compilation/compile.py +210 -0
 - sglang/srt/compilation/compiler_interface.py +503 -0
 - sglang/srt/compilation/cuda_piecewise_backend.py +228 -0
 - sglang/srt/compilation/fix_functionalization.py +134 -0
 - sglang/srt/compilation/fx_utils.py +83 -0
 - sglang/srt/compilation/inductor_pass.py +140 -0
 - sglang/srt/compilation/pass_manager.py +66 -0
 - sglang/srt/compilation/piecewise_context_manager.py +40 -0
 - sglang/srt/compilation/weak_ref_tensor_jit.py +16 -0
 - sglang/srt/configs/__init__.py +4 -0
 - sglang/srt/configs/deepseek_ocr.py +262 -0
 - sglang/srt/configs/deepseekvl2.py +194 -96
 - sglang/srt/configs/dots_vlm.py +2 -7
 - sglang/srt/configs/falcon_h1.py +13 -64
 - sglang/srt/configs/load_config.py +25 -2
 - sglang/srt/configs/mamba_utils.py +117 -0
 - sglang/srt/configs/model_config.py +136 -25
 - sglang/srt/configs/modelopt_config.py +30 -0
 - sglang/srt/configs/nemotron_h.py +286 -0
 - sglang/srt/configs/olmo3.py +105 -0
 - sglang/srt/configs/points_v15_chat.py +29 -0
 - sglang/srt/configs/qwen3_next.py +11 -47
 - sglang/srt/configs/qwen3_omni.py +613 -0
 - sglang/srt/configs/qwen3_vl.py +0 -10
 - sglang/srt/connector/remote_instance.py +1 -1
 - sglang/srt/constrained/base_grammar_backend.py +5 -1
 - sglang/srt/constrained/llguidance_backend.py +5 -0
 - sglang/srt/constrained/outlines_backend.py +1 -1
 - sglang/srt/constrained/reasoner_grammar_backend.py +9 -6
 - sglang/srt/constrained/utils.py +12 -0
 - sglang/srt/constrained/xgrammar_backend.py +20 -11
 - sglang/srt/disaggregation/ascend/transfer_engine.py +1 -1
 - sglang/srt/disaggregation/base/conn.py +17 -4
 - sglang/srt/disaggregation/common/conn.py +4 -2
 - sglang/srt/disaggregation/decode.py +123 -31
 - sglang/srt/disaggregation/decode_kvcache_offload_manager.py +1 -1
 - sglang/srt/disaggregation/fake/conn.py +11 -3
 - sglang/srt/disaggregation/mooncake/conn.py +157 -19
 - sglang/srt/disaggregation/nixl/conn.py +69 -24
 - sglang/srt/disaggregation/prefill.py +96 -270
 - sglang/srt/distributed/device_communicators/all_reduce_utils.py +4 -4
 - sglang/srt/distributed/device_communicators/custom_all_reduce.py +6 -6
 - sglang/srt/distributed/device_communicators/pymscclpp.py +2 -2
 - sglang/srt/distributed/device_communicators/pynccl.py +24 -12
 - sglang/srt/distributed/device_communicators/pynccl_allocator.py +2 -2
 - sglang/srt/distributed/device_communicators/symm_mem.py +1 -1
 - sglang/srt/distributed/naive_distributed.py +5 -4
 - sglang/srt/distributed/parallel_state.py +63 -19
 - sglang/srt/elastic_ep/elastic_ep.py +74 -0
 - sglang/srt/entrypoints/context.py +3 -2
 - sglang/srt/entrypoints/engine.py +83 -80
 - sglang/srt/entrypoints/grpc_server.py +430 -234
 - sglang/srt/entrypoints/harmony_utils.py +2 -2
 - sglang/srt/entrypoints/http_server.py +195 -102
 - sglang/srt/entrypoints/http_server_engine.py +1 -7
 - sglang/srt/entrypoints/openai/protocol.py +225 -37
 - sglang/srt/entrypoints/openai/serving_base.py +49 -2
 - sglang/srt/entrypoints/openai/serving_chat.py +29 -74
 - sglang/srt/entrypoints/openai/serving_classify.py +204 -0
 - sglang/srt/entrypoints/openai/serving_completions.py +15 -1
 - sglang/srt/entrypoints/openai/serving_responses.py +5 -2
 - sglang/srt/entrypoints/openai/serving_tokenize.py +144 -0
 - sglang/srt/environ.py +58 -6
 - sglang/srt/eplb/eplb_algorithms/__init__.py +18 -1
 - sglang/srt/eplb/eplb_algorithms/deepseek.py +0 -2
 - sglang/srt/eplb/eplb_algorithms/elasticity_aware.py +87 -0
 - sglang/srt/eplb/expert_distribution.py +33 -4
 - sglang/srt/eplb/expert_location_dispatch.py +2 -2
 - sglang/srt/eplb/expert_location_updater.py +2 -2
 - sglang/srt/function_call/base_format_detector.py +17 -18
 - sglang/srt/function_call/function_call_parser.py +20 -14
 - sglang/srt/function_call/glm4_moe_detector.py +1 -5
 - sglang/srt/function_call/gpt_oss_detector.py +1 -1
 - sglang/srt/function_call/json_array_parser.py +0 -2
 - sglang/srt/function_call/minimax_m2.py +367 -0
 - sglang/srt/function_call/utils.py +2 -2
 - sglang/srt/grpc/compile_proto.py +3 -3
 - sglang/srt/{entrypoints → grpc}/grpc_request_manager.py +112 -52
 - sglang/srt/grpc/health_servicer.py +189 -0
 - sglang/srt/grpc/scheduler_launcher.py +181 -0
 - sglang/srt/grpc/sglang_scheduler_pb2.py +78 -70
 - sglang/srt/grpc/sglang_scheduler_pb2.pyi +66 -10
 - sglang/srt/grpc/sglang_scheduler_pb2_grpc.py +89 -1
 - sglang/srt/layers/activation.py +10 -1
 - sglang/srt/layers/attention/aiter_backend.py +3 -3
 - sglang/srt/layers/attention/ascend_backend.py +17 -1
 - sglang/srt/layers/attention/attention_registry.py +43 -23
 - sglang/srt/layers/attention/base_attn_backend.py +20 -1
 - sglang/srt/layers/attention/double_sparsity_backend.py +2 -2
 - sglang/srt/layers/attention/fla/chunk.py +0 -1
 - sglang/srt/layers/attention/fla/chunk_o.py +1 -1
 - sglang/srt/layers/attention/fla/index.py +0 -2
 - sglang/srt/layers/attention/fla/layernorm_gated.py +50 -32
 - sglang/srt/layers/attention/fla/utils.py +0 -3
 - sglang/srt/layers/attention/fla/wy_fast.py +0 -2
 - sglang/srt/layers/attention/flashattention_backend.py +24 -10
 - sglang/srt/layers/attention/flashinfer_backend.py +258 -22
 - sglang/srt/layers/attention/flashinfer_mla_backend.py +38 -28
 - sglang/srt/layers/attention/flashmla_backend.py +2 -2
 - sglang/srt/layers/attention/hybrid_attn_backend.py +1 -1
 - sglang/srt/layers/attention/hybrid_linear_attn_backend.py +165 -62
 - sglang/srt/layers/attention/intel_amx_backend.py +1 -1
 - sglang/srt/layers/attention/mamba/causal_conv1d.py +1 -1
 - sglang/srt/layers/attention/mamba/causal_conv1d_triton.py +9 -5
 - sglang/srt/layers/attention/mamba/mamba.py +189 -241
 - sglang/srt/layers/attention/mamba/mamba2_metadata.py +211 -0
 - sglang/srt/layers/attention/mamba/mixer2_rms_norm_gated.py +120 -0
 - sglang/srt/layers/attention/mamba/ops/ssd_bmm.py +0 -50
 - sglang/srt/layers/attention/mamba/ops/ssd_chunk_scan.py +0 -60
 - sglang/srt/layers/attention/mamba/ops/ssd_chunk_state.py +0 -111
 - sglang/srt/layers/attention/mamba/ops/ssd_combined.py +0 -1
 - sglang/srt/layers/attention/mamba/ops/ssd_state_passing.py +0 -11
 - sglang/srt/layers/attention/npu_ops/mla_preprocess.py +1 -1
 - sglang/srt/layers/attention/nsa/nsa_indexer.py +40 -83
 - sglang/srt/layers/attention/nsa/triton_kernel.py +136 -0
 - sglang/srt/layers/attention/nsa/utils.py +0 -1
 - sglang/srt/layers/attention/nsa_backend.py +404 -90
 - sglang/srt/layers/attention/triton_backend.py +208 -34
 - sglang/srt/layers/attention/triton_ops/double_sparsity_attention.py +2 -2
 - sglang/srt/layers/attention/triton_ops/extend_attention.py +539 -44
 - sglang/srt/layers/attention/trtllm_mha_backend.py +2 -2
 - sglang/srt/layers/attention/trtllm_mla_backend.py +362 -43
 - sglang/srt/layers/attention/utils.py +89 -7
 - sglang/srt/layers/attention/vision.py +3 -3
 - sglang/srt/layers/attention/xpu_backend.py +1028 -0
 - sglang/srt/layers/communicator.py +12 -7
 - sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/compile_utils.py +5 -9
 - sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/configurer.py +4 -3
 - sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/entrypoint.py +3 -3
 - sglang/srt/layers/dp_attention.py +17 -0
 - sglang/srt/layers/layernorm.py +64 -19
 - sglang/srt/layers/linear.py +9 -1
 - sglang/srt/layers/logits_processor.py +152 -17
 - sglang/srt/layers/modelopt_utils.py +11 -0
 - sglang/srt/layers/moe/cutlass_moe.py +0 -2
 - sglang/srt/layers/moe/cutlass_w4a8_moe.py +351 -21
 - sglang/srt/layers/moe/ep_moe/kernels.py +229 -457
 - sglang/srt/layers/moe/ep_moe/layer.py +154 -625
 - sglang/srt/layers/moe/flashinfer_cutedsl_moe.py +1 -1
 - sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=128,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
 - sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=256,N=256,device_name=NVIDIA_B200.json +146 -0
 - sglang/srt/layers/moe/fused_moe_triton/fused_moe_triton_config.py +11 -3
 - sglang/srt/layers/moe/fused_moe_triton/layer.py +79 -73
 - sglang/srt/layers/moe/fused_moe_triton/triton_kernels_moe.py +25 -46
 - sglang/srt/layers/moe/moe_runner/deep_gemm.py +569 -0
 - sglang/srt/layers/moe/moe_runner/runner.py +6 -0
 - sglang/srt/layers/moe/moe_runner/triton.py +3 -1
 - sglang/srt/layers/moe/moe_runner/triton_kernels.py +194 -0
 - sglang/srt/layers/moe/rocm_moe_utils.py +0 -1
 - sglang/srt/layers/moe/router.py +51 -15
 - sglang/srt/layers/moe/token_dispatcher/__init__.py +14 -4
 - sglang/srt/layers/moe/token_dispatcher/base.py +12 -6
 - sglang/srt/layers/moe/token_dispatcher/deepep.py +127 -110
 - sglang/srt/layers/moe/token_dispatcher/mooncake.py +386 -0
 - sglang/srt/layers/moe/token_dispatcher/standard.py +46 -0
 - sglang/srt/layers/moe/topk.py +7 -6
 - sglang/srt/layers/moe/utils.py +20 -5
 - sglang/srt/layers/quantization/__init__.py +5 -58
 - sglang/srt/layers/quantization/awq.py +183 -9
 - sglang/srt/layers/quantization/awq_triton.py +29 -0
 - sglang/srt/layers/quantization/base_config.py +27 -1
 - sglang/srt/layers/quantization/compressed_tensors/__init__.py +7 -0
 - sglang/srt/layers/quantization/compressed_tensors/compressed_tensors.py +20 -49
 - sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +421 -70
 - sglang/srt/layers/quantization/compressed_tensors/schemes/__init__.py +3 -0
 - sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +4 -22
 - sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +339 -0
 - sglang/srt/layers/quantization/fp8.py +152 -81
 - sglang/srt/layers/quantization/fp8_kernel.py +55 -10
 - sglang/srt/layers/quantization/fp8_utils.py +42 -14
 - sglang/srt/layers/quantization/fpgemm_fp8.py +2 -3
 - sglang/srt/layers/quantization/gguf.py +566 -0
 - sglang/srt/layers/quantization/gptq.py +0 -1
 - sglang/srt/layers/quantization/int8_kernel.py +18 -2
 - sglang/srt/layers/quantization/marlin_utils.py +12 -0
 - sglang/srt/layers/quantization/modelopt_quant.py +125 -100
 - sglang/srt/layers/quantization/mxfp4.py +35 -68
 - sglang/srt/layers/quantization/petit.py +1 -1
 - sglang/srt/layers/quantization/quark/quark.py +3 -1
 - sglang/srt/layers/quantization/quark/quark_moe.py +3 -3
 - sglang/srt/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +0 -7
 - sglang/srt/layers/quantization/unquant.py +23 -48
 - sglang/srt/layers/quantization/utils.py +0 -1
 - sglang/srt/layers/quantization/w4afp8.py +87 -20
 - sglang/srt/layers/quantization/w8a8_int8.py +30 -24
 - sglang/srt/layers/radix_attention.py +62 -9
 - sglang/srt/layers/rotary_embedding.py +686 -17
 - sglang/srt/layers/sampler.py +47 -16
 - sglang/srt/layers/sparse_pooler.py +98 -0
 - sglang/srt/layers/utils.py +0 -1
 - sglang/srt/layers/vocab_parallel_embedding.py +4 -1
 - sglang/srt/lora/backend/triton_backend.py +0 -1
 - sglang/srt/lora/eviction_policy.py +139 -0
 - sglang/srt/lora/lora_manager.py +24 -9
 - sglang/srt/lora/lora_registry.py +1 -1
 - sglang/srt/lora/mem_pool.py +40 -16
 - sglang/srt/lora/triton_ops/chunked_sgmv_expand.py +1 -1
 - sglang/srt/lora/triton_ops/chunked_sgmv_shrink.py +4 -2
 - sglang/srt/managers/cache_controller.py +48 -17
 - sglang/srt/managers/data_parallel_controller.py +146 -42
 - sglang/srt/managers/detokenizer_manager.py +40 -13
 - sglang/srt/managers/io_struct.py +69 -16
 - sglang/srt/managers/mm_utils.py +20 -18
 - sglang/srt/managers/multi_tokenizer_mixin.py +83 -82
 - sglang/srt/managers/overlap_utils.py +96 -19
 - sglang/srt/managers/schedule_batch.py +241 -511
 - sglang/srt/managers/schedule_policy.py +15 -2
 - sglang/srt/managers/scheduler.py +420 -514
 - sglang/srt/managers/scheduler_metrics_mixin.py +73 -18
 - sglang/srt/managers/scheduler_output_processor_mixin.py +317 -111
 - sglang/srt/managers/scheduler_pp_mixin.py +341 -0
 - sglang/srt/managers/scheduler_profiler_mixin.py +60 -14
 - sglang/srt/managers/scheduler_runtime_checker_mixin.py +217 -0
 - sglang/srt/managers/scheduler_update_weights_mixin.py +33 -14
 - sglang/srt/managers/tokenizer_communicator_mixin.py +71 -55
 - sglang/srt/managers/tokenizer_manager.py +375 -95
 - sglang/srt/managers/tp_worker.py +212 -161
 - sglang/srt/managers/utils.py +78 -2
 - sglang/srt/mem_cache/allocator.py +7 -2
 - sglang/srt/mem_cache/allocator_ascend.py +2 -2
 - sglang/srt/mem_cache/base_prefix_cache.py +2 -2
 - sglang/srt/mem_cache/chunk_cache.py +13 -2
 - sglang/srt/mem_cache/common.py +480 -0
 - sglang/srt/mem_cache/evict_policy.py +16 -1
 - sglang/srt/mem_cache/hicache_storage.py +11 -2
 - sglang/srt/mem_cache/hiradix_cache.py +16 -3
 - sglang/srt/mem_cache/mamba_radix_cache.py +993 -0
 - sglang/srt/mem_cache/memory_pool.py +517 -219
 - sglang/srt/mem_cache/memory_pool_host.py +0 -1
 - sglang/srt/mem_cache/multimodal_cache.py +0 -1
 - sglang/srt/mem_cache/radix_cache.py +53 -19
 - sglang/srt/mem_cache/radix_cache_cpp.py +19 -14
 - sglang/srt/mem_cache/storage/aibrix_kvcache/aibrix_kvcache_storage.py +8 -2
 - sglang/srt/mem_cache/storage/aibrix_kvcache/unit_test.py +1 -13
 - sglang/srt/mem_cache/storage/backend_factory.py +2 -2
 - sglang/srt/mem_cache/storage/eic/eic_storage.py +5 -6
 - sglang/srt/mem_cache/storage/hf3fs/hf3fs_client.py +0 -1
 - sglang/srt/mem_cache/storage/hf3fs/mini_3fs_metadata_server.py +3 -2
 - sglang/srt/mem_cache/storage/hf3fs/storage_hf3fs.py +9 -3
 - sglang/srt/mem_cache/storage/lmcache/lmc_radix_cache.py +5 -3
 - sglang/srt/mem_cache/storage/mooncake_store/mooncake_store.py +101 -17
 - sglang/srt/mem_cache/storage/nixl/hicache_nixl.py +38 -9
 - sglang/srt/mem_cache/storage/nixl/nixl_utils.py +1 -1
 - sglang/srt/mem_cache/storage/nixl/test_hicache_nixl_storage.py +17 -2
 - sglang/srt/mem_cache/swa_radix_cache.py +92 -26
 - sglang/srt/metrics/collector.py +31 -0
 - sglang/srt/metrics/func_timer.py +1 -1
 - sglang/srt/model_executor/cuda_graph_runner.py +43 -5
 - sglang/srt/model_executor/forward_batch_info.py +71 -25
 - sglang/srt/model_executor/model_runner.py +362 -270
 - sglang/srt/model_executor/npu_graph_runner.py +2 -3
 - sglang/srt/model_executor/piecewise_cuda_graph_runner.py +549 -0
 - sglang/srt/model_loader/__init__.py +1 -1
 - sglang/srt/model_loader/loader.py +424 -27
 - sglang/srt/model_loader/utils.py +0 -1
 - sglang/srt/model_loader/weight_utils.py +47 -28
 - sglang/srt/models/apertus.py +2 -3
 - sglang/srt/models/arcee.py +2 -2
 - sglang/srt/models/bailing_moe.py +13 -52
 - sglang/srt/models/bailing_moe_nextn.py +3 -4
 - sglang/srt/models/bert.py +1 -1
 - sglang/srt/models/deepseek_nextn.py +19 -3
 - sglang/srt/models/deepseek_ocr.py +1516 -0
 - sglang/srt/models/deepseek_v2.py +418 -140
 - sglang/srt/models/dots_ocr.py +0 -2
 - sglang/srt/models/dots_vlm.py +0 -1
 - sglang/srt/models/dots_vlm_vit.py +1 -1
 - sglang/srt/models/falcon_h1.py +13 -19
 - sglang/srt/models/gemma3_mm.py +16 -0
 - sglang/srt/models/gemma3n_mm.py +1 -2
 - sglang/srt/models/glm4_moe.py +327 -382
 - sglang/srt/models/glm4_moe_nextn.py +6 -16
 - sglang/srt/models/glm4v.py +2 -1
 - sglang/srt/models/glm4v_moe.py +32 -199
 - sglang/srt/models/gpt_oss.py +5 -5
 - sglang/srt/models/grok.py +10 -23
 - sglang/srt/models/hunyuan.py +2 -7
 - sglang/srt/models/interns1.py +0 -1
 - sglang/srt/models/kimi_vl.py +1 -7
 - sglang/srt/models/kimi_vl_moonvit.py +3 -1
 - sglang/srt/models/llama.py +2 -2
 - sglang/srt/models/llama_eagle3.py +1 -1
 - sglang/srt/models/longcat_flash.py +5 -22
 - sglang/srt/models/longcat_flash_nextn.py +3 -14
 - sglang/srt/models/mimo.py +2 -13
 - sglang/srt/models/mimo_mtp.py +1 -2
 - sglang/srt/models/minicpmo.py +7 -5
 - sglang/srt/models/minimax_m2.py +922 -0
 - sglang/srt/models/mixtral.py +1 -4
 - sglang/srt/models/mllama.py +1 -1
 - sglang/srt/models/mllama4.py +13 -3
 - sglang/srt/models/nemotron_h.py +511 -0
 - sglang/srt/models/nvila.py +355 -0
 - sglang/srt/models/nvila_lite.py +184 -0
 - sglang/srt/models/olmo2.py +31 -4
 - sglang/srt/models/opt.py +5 -5
 - sglang/srt/models/phi.py +1 -1
 - sglang/srt/models/phi4mm.py +1 -1
 - sglang/srt/models/phimoe.py +0 -1
 - sglang/srt/models/pixtral.py +0 -3
 - sglang/srt/models/points_v15_chat.py +186 -0
 - sglang/srt/models/qwen.py +0 -1
 - sglang/srt/models/qwen2.py +22 -1
 - sglang/srt/models/qwen2_5_vl.py +3 -3
 - sglang/srt/models/qwen2_audio.py +2 -15
 - sglang/srt/models/qwen2_moe.py +15 -12
 - sglang/srt/models/qwen2_vl.py +5 -2
 - sglang/srt/models/qwen3.py +34 -4
 - sglang/srt/models/qwen3_moe.py +19 -37
 - sglang/srt/models/qwen3_next.py +7 -12
 - sglang/srt/models/qwen3_next_mtp.py +3 -4
 - sglang/srt/models/qwen3_omni_moe.py +661 -0
 - sglang/srt/models/qwen3_vl.py +37 -33
 - sglang/srt/models/qwen3_vl_moe.py +57 -185
 - sglang/srt/models/roberta.py +55 -3
 - sglang/srt/models/sarashina2_vision.py +0 -1
 - sglang/srt/models/step3_vl.py +3 -5
 - sglang/srt/models/utils.py +11 -1
 - sglang/srt/multimodal/processors/base_processor.py +7 -2
 - sglang/srt/multimodal/processors/deepseek_ocr.py +37 -0
 - sglang/srt/multimodal/processors/deepseek_vl_v2.py +0 -3
 - sglang/srt/multimodal/processors/dots_vlm.py +0 -1
 - sglang/srt/multimodal/processors/glm4v.py +2 -6
 - sglang/srt/multimodal/processors/internvl.py +0 -2
 - sglang/srt/multimodal/processors/janus_pro.py +0 -1
 - sglang/srt/multimodal/processors/mllama4.py +0 -8
 - sglang/srt/multimodal/processors/{vila.py → nvila.py} +32 -24
 - sglang/srt/multimodal/processors/phi4mm.py +0 -1
 - sglang/srt/multimodal/processors/points_v15_chat.py +52 -0
 - sglang/srt/multimodal/processors/qwen_vl.py +75 -16
 - sglang/srt/multimodal/processors/step3_vl.py +1 -1
 - sglang/srt/parser/conversation.py +41 -0
 - sglang/srt/parser/reasoning_parser.py +28 -2
 - sglang/srt/sampling/custom_logit_processor.py +77 -2
 - sglang/srt/sampling/sampling_batch_info.py +17 -22
 - sglang/srt/sampling/sampling_params.py +70 -2
 - sglang/srt/server_args.py +846 -163
 - sglang/srt/server_args_config_parser.py +1 -1
 - sglang/srt/single_batch_overlap.py +36 -31
 - sglang/srt/speculative/base_spec_worker.py +34 -0
 - sglang/srt/speculative/draft_utils.py +226 -0
 - sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +24 -7
 - sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py +23 -2
 - sglang/srt/speculative/eagle_info.py +57 -18
 - sglang/srt/speculative/eagle_info_v2.py +458 -0
 - sglang/srt/speculative/eagle_utils.py +138 -0
 - sglang/srt/speculative/eagle_worker.py +83 -280
 - sglang/srt/speculative/eagle_worker_v2.py +702 -0
 - sglang/srt/speculative/{ngram_utils.py → ngram_info.py} +14 -9
 - sglang/srt/speculative/ngram_worker.py +12 -11
 - sglang/srt/speculative/spec_info.py +2 -0
 - sglang/srt/speculative/spec_utils.py +38 -3
 - sglang/srt/speculative/standalone_worker.py +4 -14
 - sglang/srt/tokenizer/tiktoken_tokenizer.py +2 -2
 - sglang/srt/two_batch_overlap.py +28 -14
 - sglang/srt/utils/__init__.py +1 -1
 - sglang/srt/{bench_utils.py → utils/bench_utils.py} +4 -2
 - sglang/srt/utils/common.py +272 -82
 - sglang/srt/utils/hf_transformers_utils.py +44 -17
 - sglang/srt/{host_shared_memory.py → utils/host_shared_memory.py} +0 -1
 - sglang/srt/{offloader.py → utils/offloader.py} +4 -4
 - sglang/srt/utils/profile_merger.py +199 -0
 - sglang/test/attention/test_flashattn_backend.py +1 -1
 - sglang/test/attention/test_flashattn_mla_backend.py +0 -1
 - sglang/test/attention/test_prefix_chunk_info.py +0 -2
 - sglang/test/attention/test_trtllm_mla_backend.py +221 -53
 - sglang/test/few_shot_gsm8k_engine.py +2 -4
 - sglang/test/kit_matched_stop.py +157 -0
 - sglang/test/longbench_v2/__init__.py +1 -0
 - sglang/test/longbench_v2/test_longbench_v2_eval.py +238 -0
 - sglang/test/longbench_v2/validate_longbench_v2.py +337 -0
 - sglang/test/longbench_v2/validate_longbench_v2_standalone.py +306 -0
 - sglang/test/run_eval.py +41 -0
 - sglang/test/runners.py +2 -0
 - sglang/test/send_one.py +42 -7
 - sglang/test/simple_eval_common.py +3 -0
 - sglang/test/simple_eval_gpqa.py +0 -1
 - sglang/test/simple_eval_humaneval.py +0 -3
 - sglang/test/simple_eval_longbench_v2.py +344 -0
 - sglang/test/test_block_fp8.py +1 -2
 - sglang/test/test_block_fp8_deep_gemm_blackwell.py +0 -1
 - sglang/test/test_cutlass_moe.py +1 -2
 - sglang/test/test_cutlass_w4a8_moe.py +10 -20
 - sglang/test/test_deterministic.py +463 -107
 - sglang/test/test_deterministic_utils.py +74 -0
 - sglang/test/test_disaggregation_utils.py +81 -0
 - sglang/test/test_marlin_moe.py +0 -1
 - sglang/test/test_utils.py +85 -20
 - sglang/version.py +1 -1
 - {sglang-0.5.3rc2.dist-info → sglang-0.5.4.post1.dist-info}/METADATA +48 -35
 - {sglang-0.5.3rc2.dist-info → sglang-0.5.4.post1.dist-info}/RECORD +414 -350
 - sglang/srt/layers/attention/mamba/mamba_utils.py +0 -81
 - sglang/srt/managers/tp_worker_overlap_thread.py +0 -311
 - sglang/srt/models/vila.py +0 -306
 - sglang/srt/speculative/build_eagle_tree.py +0 -427
 - sglang/test/test_block_fp8_ep.py +0 -358
 - /sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/__init__.py +0 -0
 - /sglang/srt/{aio_rwlock.py → utils/aio_rwlock.py} +0 -0
 - /sglang/srt/{torch_memory_saver_adapter.py → utils/torch_memory_saver_adapter.py} +0 -0
 - {sglang-0.5.3rc2.dist-info → sglang-0.5.4.post1.dist-info}/WHEEL +0 -0
 - {sglang-0.5.3rc2.dist-info → sglang-0.5.4.post1.dist-info}/licenses/LICENSE +0 -0
 - {sglang-0.5.3rc2.dist-info → sglang-0.5.4.post1.dist-info}/top_level.txt +0 -0
 
| 
         @@ -3,10 +3,10 @@ 
     | 
|
| 
       3 
3 
     | 
    
         
             
            # Adapted from vLLM: https://github.com/vllm-project/vllm/blob/1b9902806915040ac9b3029f2ab7522ec505afc3/vllm/entrypoints/harmony_utils.py
         
     | 
| 
       4 
4 
     | 
    
         
             
            # Slight differences in processing chat messages
         
     | 
| 
       5 
5 
     | 
    
         
             
            import datetime
         
     | 
| 
       6 
     | 
    
         
            -
            import json
         
     | 
| 
       7 
6 
     | 
    
         
             
            from collections.abc import Iterable
         
     | 
| 
       8 
7 
     | 
    
         
             
            from typing import Literal, Optional, Union
         
     | 
| 
       9 
8 
     | 
    
         | 
| 
      
 9 
     | 
    
         
            +
            import orjson
         
     | 
| 
       10 
10 
     | 
    
         
             
            from openai.types.responses import (
         
     | 
| 
       11 
11 
     | 
    
         
             
                ResponseOutputItem,
         
     | 
| 
       12 
12 
     | 
    
         
             
                ResponseOutputMessage,
         
     | 
| 
         @@ -228,7 +228,7 @@ def parse_output_message(message: Message): 
     | 
|
| 
       228 
228 
     | 
    
         
             
                    if len(message.content) != 1:
         
     | 
| 
       229 
229 
     | 
    
         
             
                        raise ValueError("Invalid number of contents in browser message")
         
     | 
| 
       230 
230 
     | 
    
         
             
                    content = message.content[0]
         
     | 
| 
       231 
     | 
    
         
            -
                    browser_call =  
     | 
| 
      
 231 
     | 
    
         
            +
                    browser_call = orjson.loads(content.text)
         
     | 
| 
       232 
232 
     | 
    
         
             
                    # TODO: translate to url properly!
         
     | 
| 
       233 
233 
     | 
    
         
             
                    if recipient == "browser.search":
         
     | 
| 
       234 
234 
     | 
    
         
             
                        action = ActionSearch(
         
     | 
| 
         @@ -19,9 +19,8 @@ This file implements HTTP APIs for the inference engine via fastapi. 
     | 
|
| 
       19 
19 
     | 
    
         | 
| 
       20 
20 
     | 
    
         
             
            import asyncio
         
     | 
| 
       21 
21 
     | 
    
         
             
            import dataclasses
         
     | 
| 
       22 
     | 
    
         
            -
            import json
         
     | 
| 
       23 
22 
     | 
    
         
             
            import logging
         
     | 
| 
       24 
     | 
    
         
            -
            import multiprocessing 
     | 
| 
      
 23 
     | 
    
         
            +
            import multiprocessing
         
     | 
| 
       25 
24 
     | 
    
         
             
            import os
         
     | 
| 
       26 
25 
     | 
    
         
             
            import tempfile
         
     | 
| 
       27 
26 
     | 
    
         
             
            import threading
         
     | 
| 
         @@ -51,20 +50,28 @@ from sglang.srt.disaggregation.utils import FAKE_BOOTSTRAP_HOST, DisaggregationM 
     | 
|
| 
       51 
50 
     | 
    
         
             
            from sglang.srt.entrypoints.engine import _launch_subprocesses
         
     | 
| 
       52 
51 
     | 
    
         
             
            from sglang.srt.entrypoints.openai.protocol import (
         
     | 
| 
       53 
52 
     | 
    
         
             
                ChatCompletionRequest,
         
     | 
| 
      
 53 
     | 
    
         
            +
                ClassifyRequest,
         
     | 
| 
       54 
54 
     | 
    
         
             
                CompletionRequest,
         
     | 
| 
      
 55 
     | 
    
         
            +
                DetokenizeRequest,
         
     | 
| 
       55 
56 
     | 
    
         
             
                EmbeddingRequest,
         
     | 
| 
       56 
57 
     | 
    
         
             
                ErrorResponse,
         
     | 
| 
       57 
58 
     | 
    
         
             
                ModelCard,
         
     | 
| 
       58 
59 
     | 
    
         
             
                ModelList,
         
     | 
| 
       59 
60 
     | 
    
         
             
                ResponsesRequest,
         
     | 
| 
       60 
61 
     | 
    
         
             
                ScoringRequest,
         
     | 
| 
      
 62 
     | 
    
         
            +
                TokenizeRequest,
         
     | 
| 
       61 
63 
     | 
    
         
             
                V1RerankReqInput,
         
     | 
| 
       62 
64 
     | 
    
         
             
            )
         
     | 
| 
       63 
65 
     | 
    
         
             
            from sglang.srt.entrypoints.openai.serving_chat import OpenAIServingChat
         
     | 
| 
      
 66 
     | 
    
         
            +
            from sglang.srt.entrypoints.openai.serving_classify import OpenAIServingClassify
         
     | 
| 
       64 
67 
     | 
    
         
             
            from sglang.srt.entrypoints.openai.serving_completions import OpenAIServingCompletion
         
     | 
| 
       65 
68 
     | 
    
         
             
            from sglang.srt.entrypoints.openai.serving_embedding import OpenAIServingEmbedding
         
     | 
| 
       66 
69 
     | 
    
         
             
            from sglang.srt.entrypoints.openai.serving_rerank import OpenAIServingRerank
         
     | 
| 
       67 
70 
     | 
    
         
             
            from sglang.srt.entrypoints.openai.serving_score import OpenAIServingScore
         
     | 
| 
      
 71 
     | 
    
         
            +
            from sglang.srt.entrypoints.openai.serving_tokenize import (
         
     | 
| 
      
 72 
     | 
    
         
            +
                OpenAIServingDetokenize,
         
     | 
| 
      
 73 
     | 
    
         
            +
                OpenAIServingTokenize,
         
     | 
| 
      
 74 
     | 
    
         
            +
            )
         
     | 
| 
       68 
75 
     | 
    
         
             
            from sglang.srt.function_call.function_call_parser import FunctionCallParser
         
     | 
| 
       69 
76 
     | 
    
         
             
            from sglang.srt.managers.io_struct import (
         
     | 
| 
       70 
77 
     | 
    
         
             
                AbortReq,
         
     | 
| 
         @@ -89,6 +96,7 @@ from sglang.srt.managers.io_struct import ( 
     | 
|
| 
       89 
96 
     | 
    
         
             
                UnloadLoRAAdapterReqInput,
         
     | 
| 
       90 
97 
     | 
    
         
             
                UpdateWeightFromDiskReqInput,
         
     | 
| 
       91 
98 
     | 
    
         
             
                UpdateWeightsFromDistributedReqInput,
         
     | 
| 
      
 99 
     | 
    
         
            +
                UpdateWeightsFromIPCReqInput,
         
     | 
| 
       92 
100 
     | 
    
         
             
                UpdateWeightsFromTensorReqInput,
         
     | 
| 
       93 
101 
     | 
    
         
             
                UpdateWeightVersionReqInput,
         
     | 
| 
       94 
102 
     | 
    
         
             
                VertexGenerateReqInput,
         
     | 
| 
         @@ -122,6 +130,7 @@ logger = logging.getLogger(__name__) 
     | 
|
| 
       122 
130 
     | 
    
         
             
            asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
         
     | 
| 
       123 
131 
     | 
    
         | 
| 
       124 
132 
     | 
    
         
             
            HEALTH_CHECK_TIMEOUT = int(os.getenv("SGLANG_HEALTH_CHECK_TIMEOUT", 20))
         
     | 
| 
      
 133 
     | 
    
         
            +
            WAIT_WEIGHTS_READY_TIMEOUT = int(os.getenv("SGLANG_WAIT_WEIGHTS_READY_TIMEOUT", 120))
         
     | 
| 
       125 
134 
     | 
    
         | 
| 
       126 
135 
     | 
    
         | 
| 
       127 
136 
     | 
    
         
             
            # Store global states
         
     | 
| 
         @@ -142,24 +151,28 @@ def set_global_state(global_state: _GlobalState): 
     | 
|
| 
       142 
151 
     | 
    
         | 
| 
       143 
152 
     | 
    
         
             
            async def init_multi_tokenizer() -> ServerArgs:
         
     | 
| 
       144 
153 
     | 
    
         
             
                """Read args information from shm and init tokenizer manager for current process"""
         
     | 
| 
       145 
     | 
    
         
            -
                pid = os.getpid()
         
     | 
| 
       146 
     | 
    
         
            -
                main_pid = get_main_process_id()
         
     | 
| 
       147 
     | 
    
         
            -
                logger.info(f"current worker_id: {pid}, main processID: {main_pid}")
         
     | 
| 
       148 
154 
     | 
    
         | 
| 
       149 
155 
     | 
    
         
             
                # Read configuration from shared memory
         
     | 
| 
      
 156 
     | 
    
         
            +
                main_pid = get_main_process_id()
         
     | 
| 
       150 
157 
     | 
    
         
             
                port_args, server_args, scheduler_info = read_from_shared_memory(
         
     | 
| 
       151 
158 
     | 
    
         
             
                    f"multi_tokenizer_args_{main_pid}"
         
     | 
| 
       152 
159 
     | 
    
         
             
                )
         
     | 
| 
       153 
160 
     | 
    
         
             
                server_args: ServerArgs
         
     | 
| 
      
 161 
     | 
    
         
            +
                port_args: PortArgs
         
     | 
| 
       154 
162 
     | 
    
         | 
| 
       155 
163 
     | 
    
         
             
                # API key authentication is not supported in multi-tokenizer mode
         
     | 
| 
       156 
164 
     | 
    
         
             
                assert (
         
     | 
| 
       157 
165 
     | 
    
         
             
                    server_args.api_key is None
         
     | 
| 
       158 
166 
     | 
    
         
             
                ), "API key is not supported in multi-tokenizer mode"
         
     | 
| 
       159 
167 
     | 
    
         | 
| 
      
 168 
     | 
    
         
            +
                # Create a new ipc name for the current process
         
     | 
| 
       160 
169 
     | 
    
         
             
                port_args.tokenizer_ipc_name = (
         
     | 
| 
       161 
170 
     | 
    
         
             
                    f"ipc://{tempfile.NamedTemporaryFile(delete=False).name}"
         
     | 
| 
       162 
171 
     | 
    
         
             
                )
         
     | 
| 
      
 172 
     | 
    
         
            +
                logger.info(
         
     | 
| 
      
 173 
     | 
    
         
            +
                    f"Start multi-tokenizer worker process {os.getpid()}, "
         
     | 
| 
      
 174 
     | 
    
         
            +
                    f"ipc_name={port_args.tokenizer_ipc_name}"
         
     | 
| 
      
 175 
     | 
    
         
            +
                )
         
     | 
| 
       163 
176 
     | 
    
         | 
| 
       164 
177 
     | 
    
         
             
                # Launch multi-tokenizer manager process
         
     | 
| 
       165 
178 
     | 
    
         
             
                tokenizer_manager = TokenizerWorker(server_args, port_args)
         
     | 
| 
         @@ -170,10 +183,9 @@ async def init_multi_tokenizer() -> ServerArgs: 
     | 
|
| 
       170 
183 
     | 
    
         
             
                    chat_template=server_args.chat_template,
         
     | 
| 
       171 
184 
     | 
    
         
             
                    completion_template=server_args.completion_template,
         
     | 
| 
       172 
185 
     | 
    
         
             
                )
         
     | 
| 
       173 
     | 
    
         
            -
                # Register this tokenizer with the main tokenizer manager
         
     | 
| 
       174 
     | 
    
         
            -
                await tokenizer_manager.register_to_main_tokenizer_manager()
         
     | 
| 
       175 
186 
     | 
    
         | 
| 
       176 
187 
     | 
    
         
             
                tokenizer_manager.max_req_input_len = scheduler_info["max_req_input_len"]
         
     | 
| 
      
 188 
     | 
    
         
            +
             
     | 
| 
       177 
189 
     | 
    
         
             
                set_global_state(
         
     | 
| 
       178 
190 
     | 
    
         
             
                    _GlobalState(
         
     | 
| 
       179 
191 
     | 
    
         
             
                        tokenizer_manager=tokenizer_manager,
         
     | 
| 
         @@ -182,36 +194,35 @@ async def init_multi_tokenizer() -> ServerArgs: 
     | 
|
| 
       182 
194 
     | 
    
         
             
                    )
         
     | 
| 
       183 
195 
     | 
    
         
             
                )
         
     | 
| 
       184 
196 
     | 
    
         | 
| 
       185 
     | 
    
         
            -
                if server_args.enable_trace:
         
     | 
| 
       186 
     | 
    
         
            -
                    process_tracing_init(server_args.oltp_traces_endpoint, "sglang")
         
     | 
| 
       187 
     | 
    
         
            -
                    if server_args.disaggregation_mode == "null":
         
     | 
| 
       188 
     | 
    
         
            -
                        thread_label = f"MultiTokenizer-{tokenizer_manager.worker_id}"
         
     | 
| 
       189 
     | 
    
         
            -
                        trace_set_thread_info(thread_label)
         
     | 
| 
       190 
     | 
    
         
            -
             
     | 
| 
       191 
197 
     | 
    
         
             
                return server_args
         
     | 
| 
       192 
198 
     | 
    
         | 
| 
       193 
199 
     | 
    
         | 
| 
       194 
200 
     | 
    
         
             
            @asynccontextmanager
         
     | 
| 
       195 
201 
     | 
    
         
             
            async def lifespan(fast_api_app: FastAPI):
         
     | 
| 
       196 
     | 
    
         
            -
                if  
     | 
| 
      
 202 
     | 
    
         
            +
                if getattr(fast_api_app, "is_single_tokenizer_mode", False):
         
     | 
| 
      
 203 
     | 
    
         
            +
                    server_args = fast_api_app.server_args
         
     | 
| 
      
 204 
     | 
    
         
            +
                    warmup_thread_args = fast_api_app.warmup_thread_args
         
     | 
| 
      
 205 
     | 
    
         
            +
                    thread_label = "Tokenizer"
         
     | 
| 
      
 206 
     | 
    
         
            +
                else:
         
     | 
| 
       197 
207 
     | 
    
         
             
                    # Initialize multi-tokenizer support for worker processes
         
     | 
| 
       198 
     | 
    
         
            -
                     
     | 
| 
       199 
     | 
    
         
            -
             
     | 
| 
       200 
     | 
    
         
            -
             
     | 
| 
       201 
     | 
    
         
            -
             
     | 
| 
       202 
     | 
    
         
            -
             
     | 
| 
       203 
     | 
    
         
            -
                        add_prometheus_middleware(app)
         
     | 
| 
       204 
     | 
    
         
            -
                        enable_func_timer()
         
     | 
| 
       205 
     | 
    
         
            -
             
     | 
| 
       206 
     | 
    
         
            -
                    logger.info(f"Worker {worker_pid} added prometheus middleware")
         
     | 
| 
       207 
     | 
    
         
            -
                    fast_api_app.warmup_thread = threading.Thread(
         
     | 
| 
       208 
     | 
    
         
            -
                        target=_wait_and_warmup,
         
     | 
| 
       209 
     | 
    
         
            -
                        args=(
         
     | 
| 
       210 
     | 
    
         
            -
                            fast_api_app.server_args,
         
     | 
| 
       211 
     | 
    
         
            -
                            None,  # pipe_finish_writer not needed in worker
         
     | 
| 
       212 
     | 
    
         
            -
                            None,  # launch_callback not needed in worker
         
     | 
| 
       213 
     | 
    
         
            -
                        ),
         
     | 
| 
      
 208 
     | 
    
         
            +
                    server_args = await init_multi_tokenizer()
         
     | 
| 
      
 209 
     | 
    
         
            +
                    warmup_thread_args = (
         
     | 
| 
      
 210 
     | 
    
         
            +
                        server_args,
         
     | 
| 
      
 211 
     | 
    
         
            +
                        None,
         
     | 
| 
      
 212 
     | 
    
         
            +
                        None,
         
     | 
| 
       214 
213 
     | 
    
         
             
                    )
         
     | 
| 
      
 214 
     | 
    
         
            +
                    thread_label = f"MultiTokenizer-{_global_state.tokenizer_manager.worker_id}"
         
     | 
| 
      
 215 
     | 
    
         
            +
             
     | 
| 
      
 216 
     | 
    
         
            +
                # Add prometheus middleware
         
     | 
| 
      
 217 
     | 
    
         
            +
                if server_args.enable_metrics:
         
     | 
| 
      
 218 
     | 
    
         
            +
                    add_prometheus_middleware(app)
         
     | 
| 
      
 219 
     | 
    
         
            +
                    enable_func_timer()
         
     | 
| 
      
 220 
     | 
    
         
            +
             
     | 
| 
      
 221 
     | 
    
         
            +
                # Init tracing
         
     | 
| 
      
 222 
     | 
    
         
            +
                if server_args.enable_trace:
         
     | 
| 
      
 223 
     | 
    
         
            +
                    process_tracing_init(server_args.oltp_traces_endpoint, "sglang")
         
     | 
| 
      
 224 
     | 
    
         
            +
                    if server_args.disaggregation_mode == "null":
         
     | 
| 
      
 225 
     | 
    
         
            +
                        trace_set_thread_info(thread_label)
         
     | 
| 
       215 
226 
     | 
    
         | 
| 
       216 
227 
     | 
    
         
             
                # Initialize OpenAI serving handlers
         
     | 
| 
       217 
228 
     | 
    
         
             
                fast_api_app.state.openai_serving_completion = OpenAIServingCompletion(
         
     | 
| 
         @@ -223,15 +234,23 @@ async def lifespan(fast_api_app: FastAPI): 
     | 
|
| 
       223 
234 
     | 
    
         
             
                fast_api_app.state.openai_serving_embedding = OpenAIServingEmbedding(
         
     | 
| 
       224 
235 
     | 
    
         
             
                    _global_state.tokenizer_manager, _global_state.template_manager
         
     | 
| 
       225 
236 
     | 
    
         
             
                )
         
     | 
| 
      
 237 
     | 
    
         
            +
                fast_api_app.state.openai_serving_classify = OpenAIServingClassify(
         
     | 
| 
      
 238 
     | 
    
         
            +
                    _global_state.tokenizer_manager, _global_state.template_manager
         
     | 
| 
      
 239 
     | 
    
         
            +
                )
         
     | 
| 
       226 
240 
     | 
    
         
             
                fast_api_app.state.openai_serving_score = OpenAIServingScore(
         
     | 
| 
       227 
241 
     | 
    
         
             
                    _global_state.tokenizer_manager
         
     | 
| 
       228 
242 
     | 
    
         
             
                )
         
     | 
| 
       229 
243 
     | 
    
         
             
                fast_api_app.state.openai_serving_rerank = OpenAIServingRerank(
         
     | 
| 
       230 
244 
     | 
    
         
             
                    _global_state.tokenizer_manager
         
     | 
| 
       231 
245 
     | 
    
         
             
                )
         
     | 
| 
      
 246 
     | 
    
         
            +
                fast_api_app.state.openai_serving_tokenize = OpenAIServingTokenize(
         
     | 
| 
      
 247 
     | 
    
         
            +
                    _global_state.tokenizer_manager
         
     | 
| 
      
 248 
     | 
    
         
            +
                )
         
     | 
| 
      
 249 
     | 
    
         
            +
                fast_api_app.state.openai_serving_detokenize = OpenAIServingDetokenize(
         
     | 
| 
      
 250 
     | 
    
         
            +
                    _global_state.tokenizer_manager
         
     | 
| 
      
 251 
     | 
    
         
            +
                )
         
     | 
| 
       232 
252 
     | 
    
         | 
| 
       233 
     | 
    
         
            -
                 
     | 
| 
       234 
     | 
    
         
            -
             
     | 
| 
      
 253 
     | 
    
         
            +
                # Launch tool server
         
     | 
| 
       235 
254 
     | 
    
         
             
                tool_server = None
         
     | 
| 
       236 
255 
     | 
    
         
             
                if server_args.tool_server == "demo":
         
     | 
| 
       237 
256 
     | 
    
         
             
                    from sglang.srt.entrypoints.openai.tool_server import DemoToolServer
         
     | 
| 
         @@ -255,12 +274,11 @@ async def lifespan(fast_api_app: FastAPI): 
     | 
|
| 
       255 
274 
     | 
    
         
             
                        enable_force_include_usage=True,
         
     | 
| 
       256 
275 
     | 
    
         
             
                        tool_server=tool_server,
         
     | 
| 
       257 
276 
     | 
    
         
             
                    )
         
     | 
| 
       258 
     | 
    
         
            -
                except Exception 
     | 
| 
       259 
     | 
    
         
            -
                     
     | 
| 
       260 
     | 
    
         
            -
             
     | 
| 
       261 
     | 
    
         
            -
                    traceback.print_exc()
         
     | 
| 
       262 
     | 
    
         
            -
                    logger.warning(f"Can not initialize OpenAIServingResponses, error: {e}")
         
     | 
| 
      
 277 
     | 
    
         
            +
                except Exception:
         
     | 
| 
      
 278 
     | 
    
         
            +
                    traceback = get_exception_traceback()
         
     | 
| 
      
 279 
     | 
    
         
            +
                    logger.warning(f"Can not initialize OpenAIServingResponses, error: {traceback}")
         
     | 
| 
       263 
280 
     | 
    
         | 
| 
      
 281 
     | 
    
         
            +
                # Execute custom warmups
         
     | 
| 
       264 
282 
     | 
    
         
             
                if server_args.warmups is not None:
         
     | 
| 
       265 
283 
     | 
    
         
             
                    await execute_warmups(
         
     | 
| 
       266 
284 
     | 
    
         
             
                        server_args.disaggregation_mode,
         
     | 
| 
         @@ -269,18 +287,18 @@ async def lifespan(fast_api_app: FastAPI): 
     | 
|
| 
       269 
287 
     | 
    
         
             
                    )
         
     | 
| 
       270 
288 
     | 
    
         
             
                    logger.info("Warmup ended")
         
     | 
| 
       271 
289 
     | 
    
         | 
| 
       272 
     | 
    
         
            -
                 
     | 
| 
       273 
     | 
    
         
            -
                 
     | 
| 
       274 
     | 
    
         
            -
                     
     | 
| 
      
 290 
     | 
    
         
            +
                # Execute the general warmup
         
     | 
| 
      
 291 
     | 
    
         
            +
                warmup_thread = threading.Thread(
         
     | 
| 
      
 292 
     | 
    
         
            +
                    target=_wait_and_warmup,
         
     | 
| 
      
 293 
     | 
    
         
            +
                    args=warmup_thread_args,
         
     | 
| 
      
 294 
     | 
    
         
            +
                )
         
     | 
| 
      
 295 
     | 
    
         
            +
                warmup_thread.start()
         
     | 
| 
       275 
296 
     | 
    
         | 
| 
      
 297 
     | 
    
         
            +
                # Start the HTTP server
         
     | 
| 
       276 
298 
     | 
    
         
             
                try:
         
     | 
| 
       277 
299 
     | 
    
         
             
                    yield
         
     | 
| 
       278 
300 
     | 
    
         
             
                finally:
         
     | 
| 
       279 
     | 
    
         
            -
                     
     | 
| 
       280 
     | 
    
         
            -
                        pid = os.getpid()
         
     | 
| 
       281 
     | 
    
         
            -
                        logger.info(f"uvicorn worker {pid} ending...")
         
     | 
| 
       282 
     | 
    
         
            -
                        warmup_thread.join()
         
     | 
| 
       283 
     | 
    
         
            -
                        logger.info(f"uvicorn worker {pid} ended.")
         
     | 
| 
      
 301 
     | 
    
         
            +
                    warmup_thread.join()
         
     | 
| 
       284 
302 
     | 
    
         | 
| 
       285 
303 
     | 
    
         | 
| 
       286 
304 
     | 
    
         
             
            # Fast API
         
     | 
| 
         @@ -480,6 +498,11 @@ async def get_server_info(): 
     | 
|
| 
       480 
498 
     | 
    
         
             
                internal_states: List[Dict[Any, Any]] = (
         
     | 
| 
       481 
499 
     | 
    
         
             
                    await _global_state.tokenizer_manager.get_internal_state()
         
     | 
| 
       482 
500 
     | 
    
         
             
                )
         
     | 
| 
      
 501 
     | 
    
         
            +
             
     | 
| 
      
 502 
     | 
    
         
            +
                # This field is not serializable.
         
     | 
| 
      
 503 
     | 
    
         
            +
                if hasattr(_global_state.tokenizer_manager.server_args, "model_config"):
         
     | 
| 
      
 504 
     | 
    
         
            +
                    del _global_state.tokenizer_manager.server_args.model_config
         
     | 
| 
      
 505 
     | 
    
         
            +
             
     | 
| 
       483 
506 
     | 
    
         
             
                return {
         
     | 
| 
       484 
507 
     | 
    
         
             
                    **dataclasses.asdict(_global_state.tokenizer_manager.server_args),
         
     | 
| 
       485 
508 
     | 
    
         
             
                    **_global_state.scheduler_info,
         
     | 
| 
         @@ -494,7 +517,7 @@ async def get_load(): 
     | 
|
| 
       494 
517 
     | 
    
         | 
| 
       495 
518 
     | 
    
         | 
| 
       496 
519 
     | 
    
         
             
            # example usage:
         
     | 
| 
       497 
     | 
    
         
            -
            # curl -s -X POST http://localhost:30000/set_internal_state -H "Content-Type: application/json" -d '{"server_args": {" 
     | 
| 
      
 520 
     | 
    
         
            +
            # curl -s -X POST http://localhost:30000/set_internal_state -H "Content-Type: application/json" -d '{"server_args": {"pp_max_micro_batch_size": 8}}'
         
     | 
| 
       498 
521 
     | 
    
         
             
            @app.api_route("/set_internal_state", methods=["POST", "PUT"])
         
     | 
| 
       499 
522 
     | 
    
         
             
            async def set_internal_state(obj: SetInternalStateReq, request: Request):
         
     | 
| 
       500 
523 
     | 
    
         
             
                res = await _global_state.tokenizer_manager.set_internal_state(obj)
         
     | 
| 
         @@ -543,7 +566,7 @@ async def generate_request(obj: GenerateReqInput, request: Request): 
     | 
|
| 
       543 
566 
     | 
    
         
             
            async def generate_from_file_request(file: UploadFile, request: Request):
         
     | 
| 
       544 
567 
     | 
    
         
             
                """Handle a generate request, this is purely to work with input_embeds."""
         
     | 
| 
       545 
568 
     | 
    
         
             
                content = await file.read()
         
     | 
| 
       546 
     | 
    
         
            -
                input_embeds =  
     | 
| 
      
 569 
     | 
    
         
            +
                input_embeds = orjson.loads(content.decode("utf-8"))
         
     | 
| 
       547 
570 
     | 
    
         | 
| 
       548 
571 
     | 
    
         
             
                obj = GenerateReqInput(
         
     | 
| 
       549 
572 
     | 
    
         
             
                    input_embeds=input_embeds,
         
     | 
| 
         @@ -622,6 +645,7 @@ async def start_profile_async(obj: Optional[ProfileReqInput] = None): 
     | 
|
| 
       622 
645 
     | 
    
         
             
                    with_stack=obj.with_stack,
         
     | 
| 
       623 
646 
     | 
    
         
             
                    record_shapes=obj.record_shapes,
         
     | 
| 
       624 
647 
     | 
    
         
             
                    profile_by_stage=obj.profile_by_stage,
         
     | 
| 
      
 648 
     | 
    
         
            +
                    merge_profiles=obj.merge_profiles,
         
     | 
| 
       625 
649 
     | 
    
         
             
                )
         
     | 
| 
       626 
650 
     | 
    
         
             
                return Response(
         
     | 
| 
       627 
651 
     | 
    
         
             
                    content="Start profiling.\n",
         
     | 
| 
         @@ -820,6 +844,27 @@ async def update_weights_from_distributed( 
     | 
|
| 
       820 
844 
     | 
    
         
             
                    return ORJSONResponse(content, status_code=HTTPStatus.BAD_REQUEST)
         
     | 
| 
       821 
845 
     | 
    
         | 
| 
       822 
846 
     | 
    
         | 
| 
      
 847 
     | 
    
         
            +
            @app.post("/update_weights_from_ipc")
         
     | 
| 
      
 848 
     | 
    
         
            +
            async def update_weights_from_ipc(obj: UpdateWeightsFromIPCReqInput, request: Request):
         
     | 
| 
      
 849 
     | 
    
         
            +
                """Update the weights from IPC (Inter-Process Communication) for checkpoint-engine integration."""
         
     | 
| 
      
 850 
     | 
    
         
            +
                success, message = await _global_state.tokenizer_manager.update_weights_from_ipc(
         
     | 
| 
      
 851 
     | 
    
         
            +
                    obj, request
         
     | 
| 
      
 852 
     | 
    
         
            +
                )
         
     | 
| 
      
 853 
     | 
    
         
            +
             
     | 
| 
      
 854 
     | 
    
         
            +
                # Update weight version if provided and weights update was successful
         
     | 
| 
      
 855 
     | 
    
         
            +
                if success and obj.weight_version is not None:
         
     | 
| 
      
 856 
     | 
    
         
            +
                    _update_weight_version_if_provided(obj.weight_version)
         
     | 
| 
      
 857 
     | 
    
         
            +
                    message += f" Weight version updated to {obj.weight_version}."
         
     | 
| 
      
 858 
     | 
    
         
            +
             
     | 
| 
      
 859 
     | 
    
         
            +
                content = {"success": success, "message": message}
         
     | 
| 
      
 860 
     | 
    
         
            +
                if success:
         
     | 
| 
      
 861 
     | 
    
         
            +
                    if _global_state.tokenizer_manager.initial_weights_loaded is False:
         
     | 
| 
      
 862 
     | 
    
         
            +
                        _global_state.tokenizer_manager.initial_weights_loaded = True
         
     | 
| 
      
 863 
     | 
    
         
            +
                    return ORJSONResponse(content)
         
     | 
| 
      
 864 
     | 
    
         
            +
                else:
         
     | 
| 
      
 865 
     | 
    
         
            +
                    return ORJSONResponse(content, status_code=HTTPStatus.BAD_REQUEST)
         
     | 
| 
      
 866 
     | 
    
         
            +
             
     | 
| 
      
 867 
     | 
    
         
            +
             
     | 
| 
       823 
868 
     | 
    
         
             
            @app.post("/update_weight_version")
         
     | 
| 
       824 
869 
     | 
    
         
             
            async def update_weight_version(obj: UpdateWeightVersionReqInput, request: Request):
         
     | 
| 
       825 
870 
     | 
    
         
             
                """Update the weight version. This operation requires no active requests."""
         
     | 
| 
         @@ -1070,6 +1115,54 @@ async def openai_v1_embeddings(request: EmbeddingRequest, raw_request: Request): 
     | 
|
| 
       1070 
1115 
     | 
    
         
             
                )
         
     | 
| 
       1071 
1116 
     | 
    
         | 
| 
       1072 
1117 
     | 
    
         | 
| 
      
 1118 
     | 
    
         
            +
            @app.post(
         
     | 
| 
      
 1119 
     | 
    
         
            +
                "/v1/classify",
         
     | 
| 
      
 1120 
     | 
    
         
            +
                response_class=ORJSONResponse,
         
     | 
| 
      
 1121 
     | 
    
         
            +
                dependencies=[Depends(validate_json_request)],
         
     | 
| 
      
 1122 
     | 
    
         
            +
            )
         
     | 
| 
      
 1123 
     | 
    
         
            +
            async def openai_v1_classify(request: ClassifyRequest, raw_request: Request):
         
     | 
| 
      
 1124 
     | 
    
         
            +
                """OpenAI-compatible classification endpoint."""
         
     | 
| 
      
 1125 
     | 
    
         
            +
                return await raw_request.app.state.openai_serving_classify.handle_request(
         
     | 
| 
      
 1126 
     | 
    
         
            +
                    request, raw_request
         
     | 
| 
      
 1127 
     | 
    
         
            +
                )
         
     | 
| 
      
 1128 
     | 
    
         
            +
             
     | 
| 
      
 1129 
     | 
    
         
            +
             
     | 
| 
      
 1130 
     | 
    
         
            +
            @app.post(
         
     | 
| 
      
 1131 
     | 
    
         
            +
                "/v1/tokenize",
         
     | 
| 
      
 1132 
     | 
    
         
            +
                response_class=ORJSONResponse,
         
     | 
| 
      
 1133 
     | 
    
         
            +
                dependencies=[Depends(validate_json_request)],
         
     | 
| 
      
 1134 
     | 
    
         
            +
            )
         
     | 
| 
      
 1135 
     | 
    
         
            +
            @app.post(
         
     | 
| 
      
 1136 
     | 
    
         
            +
                "/tokenize",
         
     | 
| 
      
 1137 
     | 
    
         
            +
                response_class=ORJSONResponse,
         
     | 
| 
      
 1138 
     | 
    
         
            +
                dependencies=[Depends(validate_json_request)],
         
     | 
| 
      
 1139 
     | 
    
         
            +
                include_in_schema=False,
         
     | 
| 
      
 1140 
     | 
    
         
            +
            )
         
     | 
| 
      
 1141 
     | 
    
         
            +
            async def openai_v1_tokenize(request: TokenizeRequest, raw_request: Request):
         
     | 
| 
      
 1142 
     | 
    
         
            +
                """OpenAI-compatible tokenization endpoint."""
         
     | 
| 
      
 1143 
     | 
    
         
            +
                return await raw_request.app.state.openai_serving_tokenize.handle_request(
         
     | 
| 
      
 1144 
     | 
    
         
            +
                    request, raw_request
         
     | 
| 
      
 1145 
     | 
    
         
            +
                )
         
     | 
| 
      
 1146 
     | 
    
         
            +
             
     | 
| 
      
 1147 
     | 
    
         
            +
             
     | 
| 
      
 1148 
     | 
    
         
            +
            @app.post(
         
     | 
| 
      
 1149 
     | 
    
         
            +
                "/v1/detokenize",
         
     | 
| 
      
 1150 
     | 
    
         
            +
                response_class=ORJSONResponse,
         
     | 
| 
      
 1151 
     | 
    
         
            +
                dependencies=[Depends(validate_json_request)],
         
     | 
| 
      
 1152 
     | 
    
         
            +
            )
         
     | 
| 
      
 1153 
     | 
    
         
            +
            @app.post(
         
     | 
| 
      
 1154 
     | 
    
         
            +
                "/detokenize",
         
     | 
| 
      
 1155 
     | 
    
         
            +
                response_class=ORJSONResponse,
         
     | 
| 
      
 1156 
     | 
    
         
            +
                dependencies=[Depends(validate_json_request)],
         
     | 
| 
      
 1157 
     | 
    
         
            +
                include_in_schema=False,
         
     | 
| 
      
 1158 
     | 
    
         
            +
            )
         
     | 
| 
      
 1159 
     | 
    
         
            +
            async def openai_v1_detokenize(request: DetokenizeRequest, raw_request: Request):
         
     | 
| 
      
 1160 
     | 
    
         
            +
                """OpenAI-compatible detokenization endpoint."""
         
     | 
| 
      
 1161 
     | 
    
         
            +
                return await raw_request.app.state.openai_serving_detokenize.handle_request(
         
     | 
| 
      
 1162 
     | 
    
         
            +
                    request, raw_request
         
     | 
| 
      
 1163 
     | 
    
         
            +
                )
         
     | 
| 
      
 1164 
     | 
    
         
            +
             
     | 
| 
      
 1165 
     | 
    
         
            +
             
     | 
| 
       1073 
1166 
     | 
    
         
             
            @app.get("/v1/models", response_class=ORJSONResponse)
         
     | 
| 
       1074 
1167 
     | 
    
         
             
            async def available_models():
         
     | 
| 
       1075 
1168 
     | 
    
         
             
                """Show available models. OpenAI-compatible endpoint."""
         
     | 
| 
         @@ -1239,27 +1332,12 @@ def launch_server( 
     | 
|
| 
       1239 
1332 
     | 
    
         
             
                    3. DetokenizerManager (subprocess): Detokenizes the output tokens and sends the result back to the Tokenizer Manager.
         
     | 
| 
       1240 
1333 
     | 
    
         | 
| 
       1241 
1334 
     | 
    
         
             
                Note:
         
     | 
| 
       1242 
     | 
    
         
            -
                1. The HTTP server, Engine, and TokenizerManager  
     | 
| 
      
 1335 
     | 
    
         
            +
                1. The HTTP server, Engine, and TokenizerManager all run in the main process.
         
     | 
| 
       1243 
1336 
     | 
    
         
             
                2. Inter-process communication is done through IPC (each process uses a different port) via the ZMQ library.
         
     | 
| 
       1244 
1337 
     | 
    
         
             
                """
         
     | 
| 
       1245 
     | 
    
         
            -
                 
     | 
| 
       1246 
     | 
    
         
            -
                     
     | 
| 
       1247 
     | 
    
         
            -
             
     | 
| 
       1248 
     | 
    
         
            -
                        f"ipc://{tempfile.NamedTemporaryFile(delete=False).name}"
         
     | 
| 
       1249 
     | 
    
         
            -
                    )
         
     | 
| 
       1250 
     | 
    
         
            -
                    tokenizer_manager, template_manager, scheduler_info = _launch_subprocesses(
         
     | 
| 
       1251 
     | 
    
         
            -
                        server_args=server_args, port_args=port_args
         
     | 
| 
       1252 
     | 
    
         
            -
                    )
         
     | 
| 
       1253 
     | 
    
         
            -
                else:
         
     | 
| 
       1254 
     | 
    
         
            -
                    tokenizer_manager, template_manager, scheduler_info = _launch_subprocesses(
         
     | 
| 
       1255 
     | 
    
         
            -
                        server_args=server_args,
         
     | 
| 
       1256 
     | 
    
         
            -
                    )
         
     | 
| 
       1257 
     | 
    
         
            -
             
     | 
| 
       1258 
     | 
    
         
            -
                    if server_args.enable_trace:
         
     | 
| 
       1259 
     | 
    
         
            -
                        process_tracing_init(server_args.oltp_traces_endpoint, "sglang")
         
     | 
| 
       1260 
     | 
    
         
            -
                        if server_args.disaggregation_mode == "null":
         
     | 
| 
       1261 
     | 
    
         
            -
                            thread_label = "Tokenizer"
         
     | 
| 
       1262 
     | 
    
         
            -
                            trace_set_thread_info(thread_label)
         
     | 
| 
      
 1338 
     | 
    
         
            +
                tokenizer_manager, template_manager, scheduler_info, port_args = (
         
     | 
| 
      
 1339 
     | 
    
         
            +
                    _launch_subprocesses(server_args=server_args)
         
     | 
| 
      
 1340 
     | 
    
         
            +
                )
         
     | 
| 
       1263 
1341 
     | 
    
         | 
| 
       1264 
1342 
     | 
    
         
             
                set_global_state(
         
     | 
| 
       1265 
1343 
     | 
    
         
             
                    _GlobalState(
         
     | 
| 
         @@ -1269,40 +1347,45 @@ def launch_server( 
     | 
|
| 
       1269 
1347 
     | 
    
         
             
                    )
         
     | 
| 
       1270 
1348 
     | 
    
         
             
                )
         
     | 
| 
       1271 
1349 
     | 
    
         | 
| 
       1272 
     | 
    
         
            -
                 
     | 
| 
       1273 
     | 
    
         
            -
             
     | 
| 
       1274 
     | 
    
         
            -
             
     | 
| 
      
 1350 
     | 
    
         
            +
                # Pass additional arguments to the lifespan function.
         
     | 
| 
      
 1351 
     | 
    
         
            +
                # They will be used for additional initialization setups.
         
     | 
| 
      
 1352 
     | 
    
         
            +
                if server_args.tokenizer_worker_num == 1:
         
     | 
| 
      
 1353 
     | 
    
         
            +
                    # If it is single tokenizer mode, we can pass the arguments by attributes of the app object.
         
     | 
| 
      
 1354 
     | 
    
         
            +
                    app.is_single_tokenizer_mode = True
         
     | 
| 
      
 1355 
     | 
    
         
            +
                    app.server_args = server_args
         
     | 
| 
      
 1356 
     | 
    
         
            +
                    app.warmup_thread_args = (
         
     | 
| 
       1275 
1357 
     | 
    
         
             
                        server_args,
         
     | 
| 
       1276 
     | 
    
         
            -
                         
     | 
| 
      
 1358 
     | 
    
         
            +
                        pipe_finish_writer,
         
     | 
| 
      
 1359 
     | 
    
         
            +
                        launch_callback,
         
     | 
| 
       1277 
1360 
     | 
    
         
             
                    )
         
     | 
| 
       1278 
     | 
    
         
            -
             
     | 
| 
      
 1361 
     | 
    
         
            +
             
     | 
| 
       1279 
1362 
     | 
    
         
             
                    # Add api key authorization
         
     | 
| 
      
 1363 
     | 
    
         
            +
                    # This is only supported in single tokenizer mode.
         
     | 
| 
       1280 
1364 
     | 
    
         
             
                    if server_args.api_key:
         
     | 
| 
       1281 
1365 
     | 
    
         
             
                        add_api_key_middleware(app, server_args.api_key)
         
     | 
| 
       1282 
     | 
    
         
            -
             
     | 
| 
       1283 
     | 
    
         
            -
                    #  
     | 
| 
       1284 
     | 
    
         
            -
                     
     | 
| 
       1285 
     | 
    
         
            -
             
     | 
| 
       1286 
     | 
    
         
            -
             
     | 
| 
       1287 
     | 
    
         
            -
             
     | 
| 
       1288 
     | 
    
         
            -
                    # Send a warmup request - we will create the thread launch it
         
     | 
| 
       1289 
     | 
    
         
            -
                    # in the lifespan after all other warmups have fired.
         
     | 
| 
       1290 
     | 
    
         
            -
                    warmup_thread = threading.Thread(
         
     | 
| 
       1291 
     | 
    
         
            -
                        target=_wait_and_warmup,
         
     | 
| 
       1292 
     | 
    
         
            -
                        args=(
         
     | 
| 
       1293 
     | 
    
         
            -
                            server_args,
         
     | 
| 
       1294 
     | 
    
         
            -
                            pipe_finish_writer,
         
     | 
| 
       1295 
     | 
    
         
            -
                            launch_callback,
         
     | 
| 
       1296 
     | 
    
         
            -
                        ),
         
     | 
| 
      
 1366 
     | 
    
         
            +
                else:
         
     | 
| 
      
 1367 
     | 
    
         
            +
                    # If it is multi-tokenizer mode, we need to write the arguments to shared memory
         
     | 
| 
      
 1368 
     | 
    
         
            +
                    # for other worker processes to read.
         
     | 
| 
      
 1369 
     | 
    
         
            +
                    app.is_single_tokenizer_mode = False
         
     | 
| 
      
 1370 
     | 
    
         
            +
                    multi_tokenizer_args_shm = write_data_for_multi_tokenizer(
         
     | 
| 
      
 1371 
     | 
    
         
            +
                        port_args, server_args, scheduler_info
         
     | 
| 
       1297 
1372 
     | 
    
         
             
                    )
         
     | 
| 
       1298 
     | 
    
         
            -
                    app.warmup_thread = warmup_thread
         
     | 
| 
       1299 
1373 
     | 
    
         | 
| 
       1300 
1374 
     | 
    
         
             
                try:
         
     | 
| 
       1301 
1375 
     | 
    
         
             
                    # Update logging configs
         
     | 
| 
       1302 
1376 
     | 
    
         
             
                    set_uvicorn_logging_configs()
         
     | 
| 
       1303 
     | 
    
         
            -
             
     | 
| 
      
 1377 
     | 
    
         
            +
             
     | 
| 
       1304 
1378 
     | 
    
         
             
                    # Listen for HTTP requests
         
     | 
| 
       1305 
     | 
    
         
            -
                    if server_args.tokenizer_worker_num  
     | 
| 
      
 1379 
     | 
    
         
            +
                    if server_args.tokenizer_worker_num == 1:
         
     | 
| 
      
 1380 
     | 
    
         
            +
                        uvicorn.run(
         
     | 
| 
      
 1381 
     | 
    
         
            +
                            app,
         
     | 
| 
      
 1382 
     | 
    
         
            +
                            host=server_args.host,
         
     | 
| 
      
 1383 
     | 
    
         
            +
                            port=server_args.port,
         
     | 
| 
      
 1384 
     | 
    
         
            +
                            log_level=server_args.log_level_http or server_args.log_level,
         
     | 
| 
      
 1385 
     | 
    
         
            +
                            timeout_keep_alive=5,
         
     | 
| 
      
 1386 
     | 
    
         
            +
                            loop="uvloop",
         
     | 
| 
      
 1387 
     | 
    
         
            +
                        )
         
     | 
| 
      
 1388 
     | 
    
         
            +
                    else:
         
     | 
| 
       1306 
1389 
     | 
    
         
             
                        from uvicorn.config import LOGGING_CONFIG
         
     | 
| 
       1307 
1390 
     | 
    
         | 
| 
       1308 
1391 
     | 
    
         
             
                        LOGGING_CONFIG["loggers"]["sglang.srt.entrypoints.http_server"] = {
         
     | 
| 
         @@ -1310,7 +1393,6 @@ def launch_server( 
     | 
|
| 
       1310 
1393 
     | 
    
         
             
                            "level": "INFO",
         
     | 
| 
       1311 
1394 
     | 
    
         
             
                            "propagate": False,
         
     | 
| 
       1312 
1395 
     | 
    
         
             
                        }
         
     | 
| 
       1313 
     | 
    
         
            -
             
     | 
| 
       1314 
1396 
     | 
    
         
             
                        monkey_patch_uvicorn_multiprocessing()
         
     | 
| 
       1315 
1397 
     | 
    
         | 
| 
       1316 
1398 
     | 
    
         
             
                        uvicorn.run(
         
     | 
| 
         @@ -1322,22 +1404,10 @@ def launch_server( 
     | 
|
| 
       1322 
1404 
     | 
    
         
             
                            loop="uvloop",
         
     | 
| 
       1323 
1405 
     | 
    
         
             
                            workers=server_args.tokenizer_worker_num,
         
     | 
| 
       1324 
1406 
     | 
    
         
             
                        )
         
     | 
| 
       1325 
     | 
    
         
            -
                    else:
         
     | 
| 
       1326 
     | 
    
         
            -
                        app.is_single_tokenizer_mode = True
         
     | 
| 
       1327 
     | 
    
         
            -
                        uvicorn.run(
         
     | 
| 
       1328 
     | 
    
         
            -
                            app,
         
     | 
| 
       1329 
     | 
    
         
            -
                            host=server_args.host,
         
     | 
| 
       1330 
     | 
    
         
            -
                            port=server_args.port,
         
     | 
| 
       1331 
     | 
    
         
            -
                            log_level=server_args.log_level_http or server_args.log_level,
         
     | 
| 
       1332 
     | 
    
         
            -
                            timeout_keep_alive=5,
         
     | 
| 
       1333 
     | 
    
         
            -
                            loop="uvloop",
         
     | 
| 
       1334 
     | 
    
         
            -
                        )
         
     | 
| 
       1335 
1407 
     | 
    
         
             
                finally:
         
     | 
| 
       1336 
1408 
     | 
    
         
             
                    if server_args.tokenizer_worker_num > 1:
         
     | 
| 
       1337 
1409 
     | 
    
         
             
                        multi_tokenizer_args_shm.unlink()
         
     | 
| 
       1338 
1410 
     | 
    
         
             
                        _global_state.tokenizer_manager.socket_mapping.clear_all_sockets()
         
     | 
| 
       1339 
     | 
    
         
            -
                    else:
         
     | 
| 
       1340 
     | 
    
         
            -
                        warmup_thread.join()
         
     | 
| 
       1341 
1411 
     | 
    
         | 
| 
       1342 
1412 
     | 
    
         | 
| 
       1343 
1413 
     | 
    
         
             
            def _execute_server_warmup(
         
     | 
| 
         @@ -1464,6 +1534,8 @@ def _wait_and_warmup( 
     | 
|
| 
       1464 
1534 
     | 
    
         
             
                pipe_finish_writer: Optional[multiprocessing.connection.Connection],
         
     | 
| 
       1465 
1535 
     | 
    
         
             
                launch_callback: Optional[Callable[[], None]] = None,
         
     | 
| 
       1466 
1536 
     | 
    
         
             
            ):
         
     | 
| 
      
 1537 
     | 
    
         
            +
                if server_args.checkpoint_engine_wait_weights_before_ready:
         
     | 
| 
      
 1538 
     | 
    
         
            +
                    _wait_weights_ready()
         
     | 
| 
       1467 
1539 
     | 
    
         
             
                if not server_args.skip_server_warmup:
         
     | 
| 
       1468 
1540 
     | 
    
         
             
                    if not _execute_server_warmup(
         
     | 
| 
       1469 
1541 
     | 
    
         
             
                        server_args,
         
     | 
| 
         @@ -1486,3 +1558,24 @@ def _wait_and_warmup( 
     | 
|
| 
       1486 
1558 
     | 
    
         | 
| 
       1487 
1559 
     | 
    
         
             
                if launch_callback is not None:
         
     | 
| 
       1488 
1560 
     | 
    
         
             
                    launch_callback()
         
     | 
| 
      
 1561 
     | 
    
         
            +
             
     | 
| 
      
 1562 
     | 
    
         
            +
             
     | 
| 
      
 1563 
     | 
    
         
            +
            def _wait_weights_ready():
         
     | 
| 
      
 1564 
     | 
    
         
            +
                """Wait for weights to be ready within the specified timeout."""
         
     | 
| 
      
 1565 
     | 
    
         
            +
                timeout = WAIT_WEIGHTS_READY_TIMEOUT
         
     | 
| 
      
 1566 
     | 
    
         
            +
                start_time = time.time()
         
     | 
| 
      
 1567 
     | 
    
         
            +
             
     | 
| 
      
 1568 
     | 
    
         
            +
                for _ in range(timeout):
         
     | 
| 
      
 1569 
     | 
    
         
            +
                    if _global_state.tokenizer_manager.initial_weights_loaded:
         
     | 
| 
      
 1570 
     | 
    
         
            +
                        logger.info(
         
     | 
| 
      
 1571 
     | 
    
         
            +
                            f"Weights are ready after {time.time() - start_time:.2f} seconds"
         
     | 
| 
      
 1572 
     | 
    
         
            +
                        )
         
     | 
| 
      
 1573 
     | 
    
         
            +
                        return
         
     | 
| 
      
 1574 
     | 
    
         
            +
                    time.sleep(1)
         
     | 
| 
      
 1575 
     | 
    
         
            +
             
     | 
| 
      
 1576 
     | 
    
         
            +
                # Timeout reached without weights being ready
         
     | 
| 
      
 1577 
     | 
    
         
            +
                logger.error(
         
     | 
| 
      
 1578 
     | 
    
         
            +
                    f"Weights are not ready after waiting {timeout} seconds. "
         
     | 
| 
      
 1579 
     | 
    
         
            +
                    f"Consider increasing SGLANG_WAIT_WEIGHTS_READY_TIMEOUT environment variable. "
         
     | 
| 
      
 1580 
     | 
    
         
            +
                    f"Current status: initial_weights_loaded={_global_state.tokenizer_manager.initial_weights_loaded}"
         
     | 
| 
      
 1581 
     | 
    
         
            +
                )
         
     | 
| 
         @@ -1,15 +1,9 @@ 
     | 
|
| 
       1 
     | 
    
         
            -
            import copy
         
     | 
| 
       2 
     | 
    
         
            -
            import dataclasses
         
     | 
| 
       3 
1 
     | 
    
         
             
            import multiprocessing
         
     | 
| 
       4 
     | 
    
         
            -
            import pickle
         
     | 
| 
       5 
     | 
    
         
            -
            import threading
         
     | 
| 
       6 
2 
     | 
    
         
             
            import time
         
     | 
| 
       7 
     | 
    
         
            -
            from typing import  
     | 
| 
      
 3 
     | 
    
         
            +
            from typing import List, Optional, Tuple
         
     | 
| 
       8 
4 
     | 
    
         | 
| 
       9 
     | 
    
         
            -
            import pybase64
         
     | 
| 
       10 
5 
     | 
    
         
             
            import requests
         
     | 
| 
       11 
6 
     | 
    
         
             
            import torch
         
     | 
| 
       12 
     | 
    
         
            -
            import torch.distributed as dist
         
     | 
| 
       13 
7 
     | 
    
         | 
| 
       14 
8 
     | 
    
         
             
            from sglang.srt.entrypoints.EngineBase import EngineBase
         
     | 
| 
       15 
9 
     | 
    
         
             
            from sglang.srt.entrypoints.http_server import launch_server
         
     |