sglang 0.5.2rc2__py3-none-any.whl → 0.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (377) hide show
  1. sglang/bench_one_batch.py +7 -9
  2. sglang/bench_one_batch_server.py +330 -31
  3. sglang/bench_serving.py +267 -32
  4. sglang/global_config.py +2 -2
  5. sglang/lang/backend/runtime_endpoint.py +1 -1
  6. sglang/launch_server.py +14 -0
  7. sglang/profiler.py +2 -2
  8. sglang/srt/batch_invariant_ops/__init__.py +27 -0
  9. sglang/srt/batch_invariant_ops/batch_invariant_ops.py +549 -0
  10. sglang/srt/configs/__init__.py +8 -0
  11. sglang/srt/configs/device_config.py +3 -1
  12. sglang/srt/configs/dots_ocr.py +64 -0
  13. sglang/srt/configs/dots_vlm.py +139 -0
  14. sglang/srt/configs/falcon_h1.py +360 -0
  15. sglang/srt/configs/load_config.py +9 -0
  16. sglang/srt/configs/model_config.py +181 -82
  17. sglang/srt/configs/qwen3_next.py +326 -0
  18. sglang/srt/configs/qwen3_vl.py +586 -0
  19. sglang/srt/connector/__init__.py +8 -1
  20. sglang/srt/connector/remote_instance.py +82 -0
  21. sglang/srt/constrained/base_grammar_backend.py +49 -12
  22. sglang/srt/constrained/llguidance_backend.py +0 -1
  23. sglang/srt/constrained/outlines_backend.py +0 -1
  24. sglang/srt/constrained/outlines_jump_forward.py +1 -1
  25. sglang/srt/constrained/xgrammar_backend.py +30 -9
  26. sglang/srt/custom_op.py +11 -1
  27. sglang/srt/debug_utils/dump_comparator.py +81 -44
  28. sglang/srt/debug_utils/dump_loader.py +97 -0
  29. sglang/srt/debug_utils/dumper.py +21 -6
  30. sglang/srt/debug_utils/text_comparator.py +73 -11
  31. sglang/srt/disaggregation/ascend/conn.py +2 -2
  32. sglang/srt/disaggregation/ascend/transfer_engine.py +47 -9
  33. sglang/srt/disaggregation/base/conn.py +1 -1
  34. sglang/srt/disaggregation/common/conn.py +279 -108
  35. sglang/srt/disaggregation/decode.py +71 -19
  36. sglang/srt/disaggregation/decode_kvcache_offload_manager.py +185 -0
  37. sglang/srt/disaggregation/decode_schedule_batch_mixin.py +29 -17
  38. sglang/srt/disaggregation/fake/conn.py +1 -1
  39. sglang/srt/disaggregation/mini_lb.py +6 -445
  40. sglang/srt/disaggregation/mooncake/conn.py +55 -537
  41. sglang/srt/disaggregation/nixl/conn.py +326 -53
  42. sglang/srt/disaggregation/prefill.py +36 -17
  43. sglang/srt/disaggregation/utils.py +40 -54
  44. sglang/srt/distributed/device_communicators/all_reduce_utils.py +16 -0
  45. sglang/srt/distributed/device_communicators/shm_broadcast.py +4 -2
  46. sglang/srt/distributed/device_communicators/symm_mem.py +164 -0
  47. sglang/srt/distributed/parallel_state.py +156 -80
  48. sglang/srt/entrypoints/engine.py +59 -18
  49. sglang/srt/entrypoints/grpc_request_manager.py +855 -0
  50. sglang/srt/entrypoints/grpc_server.py +810 -0
  51. sglang/srt/entrypoints/http_server.py +130 -59
  52. sglang/srt/entrypoints/openai/protocol.py +112 -4
  53. sglang/srt/entrypoints/openai/serving_base.py +65 -3
  54. sglang/srt/entrypoints/openai/serving_chat.py +204 -55
  55. sglang/srt/entrypoints/openai/serving_completions.py +14 -3
  56. sglang/srt/entrypoints/openai/serving_embedding.py +9 -3
  57. sglang/srt/entrypoints/openai/serving_rerank.py +3 -1
  58. sglang/srt/entrypoints/openai/serving_responses.py +48 -3
  59. sglang/srt/entrypoints/openai/serving_score.py +1 -0
  60. sglang/srt/environ.py +285 -0
  61. sglang/srt/eplb/eplb_manager.py +2 -2
  62. sglang/srt/eplb/expert_distribution.py +26 -13
  63. sglang/srt/eplb/expert_location.py +38 -8
  64. sglang/srt/eplb/expert_location_updater.py +1 -1
  65. sglang/srt/function_call/base_format_detector.py +3 -6
  66. sglang/srt/function_call/ebnf_composer.py +11 -9
  67. sglang/srt/function_call/function_call_parser.py +9 -2
  68. sglang/srt/function_call/glm4_moe_detector.py +4 -4
  69. sglang/srt/function_call/gpt_oss_detector.py +23 -0
  70. sglang/srt/function_call/json_array_parser.py +63 -0
  71. sglang/srt/function_call/kimik2_detector.py +17 -4
  72. sglang/srt/function_call/qwen3_coder_detector.py +1 -1
  73. sglang/srt/function_call/utils.py +96 -5
  74. sglang/srt/grpc/__init__.py +1 -0
  75. sglang/srt/grpc/compile_proto.py +245 -0
  76. sglang/srt/grpc/sglang_scheduler_pb2.py +111 -0
  77. sglang/srt/grpc/sglang_scheduler_pb2.pyi +434 -0
  78. sglang/srt/grpc/sglang_scheduler_pb2_grpc.py +239 -0
  79. sglang/srt/layers/activation.py +143 -9
  80. sglang/srt/layers/attention/aiter_backend.py +14 -15
  81. sglang/srt/layers/attention/ascend_backend.py +115 -9
  82. sglang/srt/layers/attention/attention_registry.py +206 -0
  83. sglang/srt/layers/attention/base_attn_backend.py +12 -3
  84. sglang/srt/layers/attention/cutlass_mla_backend.py +3 -3
  85. sglang/srt/layers/attention/dual_chunk_flashattention_backend.py +1 -1
  86. sglang/srt/layers/attention/fla/chunk.py +242 -0
  87. sglang/srt/layers/attention/fla/chunk_delta_h.py +314 -0
  88. sglang/srt/layers/attention/fla/chunk_o.py +178 -0
  89. sglang/srt/layers/attention/fla/chunk_scaled_dot_kkt.py +151 -0
  90. sglang/srt/layers/attention/fla/cumsum.py +300 -0
  91. sglang/srt/layers/attention/fla/fused_recurrent.py +640 -0
  92. sglang/srt/layers/attention/fla/fused_sigmoid_gating_recurrent.py +232 -0
  93. sglang/srt/layers/attention/fla/index.py +37 -0
  94. sglang/srt/layers/attention/fla/l2norm.py +150 -0
  95. sglang/srt/layers/attention/fla/layernorm_gated.py +326 -0
  96. sglang/srt/layers/attention/fla/op.py +66 -0
  97. sglang/srt/layers/attention/fla/solve_tril.py +465 -0
  98. sglang/srt/layers/attention/fla/utils.py +331 -0
  99. sglang/srt/layers/attention/fla/wy_fast.py +158 -0
  100. sglang/srt/layers/attention/flashattention_backend.py +41 -8
  101. sglang/srt/layers/attention/flashinfer_backend.py +118 -198
  102. sglang/srt/layers/attention/flashinfer_mla_backend.py +27 -27
  103. sglang/srt/layers/attention/flashmla_backend.py +7 -5
  104. sglang/srt/layers/attention/hybrid_attn_backend.py +68 -53
  105. sglang/srt/layers/attention/hybrid_linear_attn_backend.py +602 -0
  106. sglang/srt/layers/attention/intel_amx_backend.py +3 -0
  107. sglang/srt/layers/attention/mamba/causal_conv1d.py +129 -0
  108. sglang/srt/layers/attention/mamba/causal_conv1d_triton.py +969 -0
  109. sglang/srt/layers/attention/mamba/mamba.py +629 -0
  110. sglang/srt/layers/attention/mamba/mamba_utils.py +81 -0
  111. sglang/srt/layers/attention/mamba/ops/__init__.py +2 -0
  112. sglang/srt/layers/attention/mamba/ops/layernorm_gated.py +172 -0
  113. sglang/srt/layers/attention/mamba/ops/mamba_ssm.py +442 -0
  114. sglang/srt/layers/attention/mamba/ops/ssd_bmm.py +264 -0
  115. sglang/srt/layers/attention/mamba/ops/ssd_chunk_scan.py +622 -0
  116. sglang/srt/layers/attention/mamba/ops/ssd_chunk_state.py +757 -0
  117. sglang/srt/layers/attention/mamba/ops/ssd_combined.py +262 -0
  118. sglang/srt/layers/attention/mamba/ops/ssd_state_passing.py +275 -0
  119. sglang/srt/layers/attention/npu_ops/mla_preprocess.py +393 -0
  120. sglang/srt/layers/attention/nsa/dequant_k_cache.py +163 -0
  121. sglang/srt/layers/attention/nsa/index_buf_accessor.py +354 -0
  122. sglang/srt/layers/attention/nsa/nsa_indexer.py +761 -0
  123. sglang/srt/layers/attention/nsa/quant_k_cache.py +255 -0
  124. sglang/srt/layers/attention/nsa/tilelang_kernel.py +785 -0
  125. sglang/srt/layers/attention/nsa/transform_index.py +144 -0
  126. sglang/srt/layers/attention/nsa/utils.py +24 -0
  127. sglang/srt/layers/attention/nsa_backend.py +887 -0
  128. sglang/srt/layers/attention/tbo_backend.py +6 -6
  129. sglang/srt/layers/attention/torch_flex_backend.py +325 -0
  130. sglang/srt/layers/attention/torch_native_backend.py +12 -6
  131. sglang/srt/layers/attention/triton_backend.py +57 -7
  132. sglang/srt/layers/attention/trtllm_mha_backend.py +5 -7
  133. sglang/srt/layers/attention/trtllm_mla_backend.py +276 -39
  134. sglang/srt/layers/attention/vision.py +58 -0
  135. sglang/srt/layers/attention/wave_backend.py +4 -4
  136. sglang/srt/layers/attention/wave_ops/decode_attention.py +2 -4
  137. sglang/srt/layers/attention/wave_ops/extend_attention.py +1 -3
  138. sglang/srt/layers/communicator.py +8 -0
  139. sglang/srt/layers/dp_attention.py +41 -2
  140. sglang/srt/layers/elementwise.py +3 -1
  141. sglang/srt/layers/layernorm.py +34 -15
  142. sglang/srt/layers/linear.py +55 -7
  143. sglang/srt/layers/logits_processor.py +44 -12
  144. sglang/srt/layers/moe/__init__.py +2 -1
  145. sglang/srt/layers/moe/cutlass_w4a8_moe.py +3 -3
  146. sglang/srt/layers/moe/ep_moe/kernels.py +2 -2
  147. sglang/srt/layers/moe/ep_moe/layer.py +256 -63
  148. sglang/srt/layers/moe/flashinfer_cutedsl_moe.py +183 -0
  149. sglang/srt/layers/moe/fused_moe_native.py +5 -3
  150. sglang/srt/layers/moe/fused_moe_triton/configs/{triton_3_4_0/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json → triton_3_3_1/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json } +35 -35
  151. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=128,N=352,device_name=NVIDIA_RTX_5880_Ada_Generation,dtype=fp8_w8a8.json +146 -0
  152. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=256,N=256,device_name=NVIDIA_H800,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
  153. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=256,N=512,device_name=NVIDIA_H20.json +146 -0
  154. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  155. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  156. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  157. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=512,N=128,device_name=NVIDIA_H800,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
  158. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  159. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  160. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  161. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=512,N=64,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  162. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  163. sglang/srt/layers/moe/fused_moe_triton/fused_moe.py +5 -2
  164. sglang/srt/layers/moe/fused_moe_triton/fused_moe_triton_config.py +7 -3
  165. sglang/srt/layers/moe/fused_moe_triton/fused_moe_triton_kernels.py +23 -20
  166. sglang/srt/layers/moe/fused_moe_triton/layer.py +71 -70
  167. sglang/srt/layers/moe/moe_runner/__init__.py +2 -1
  168. sglang/srt/layers/moe/moe_runner/base.py +274 -1
  169. sglang/srt/layers/moe/moe_runner/runner.py +80 -0
  170. sglang/srt/layers/moe/moe_runner/triton.py +448 -0
  171. sglang/srt/layers/moe/token_dispatcher/__init__.py +16 -4
  172. sglang/srt/layers/moe/token_dispatcher/{base_dispatcher.py → base.py} +67 -17
  173. sglang/srt/layers/moe/token_dispatcher/deepep.py +118 -56
  174. sglang/srt/layers/moe/token_dispatcher/standard.py +44 -2
  175. sglang/srt/layers/moe/topk.py +30 -9
  176. sglang/srt/layers/moe/utils.py +22 -6
  177. sglang/srt/layers/parameter.py +23 -6
  178. sglang/srt/layers/quantization/awq.py +19 -7
  179. sglang/srt/layers/quantization/base_config.py +11 -6
  180. sglang/srt/layers/quantization/blockwise_int8.py +38 -27
  181. sglang/srt/layers/quantization/compressed_tensors/compressed_tensors.py +1 -0
  182. sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +50 -30
  183. sglang/srt/layers/quantization/compressed_tensors/schemes/__init__.py +2 -0
  184. sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +13 -1
  185. sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +173 -0
  186. sglang/srt/layers/quantization/deep_gemm_wrapper/configurer.py +2 -10
  187. sglang/srt/layers/quantization/deep_gemm_wrapper/entrypoint.py +27 -0
  188. sglang/srt/layers/quantization/fp8.py +78 -49
  189. sglang/srt/layers/quantization/fp8_utils.py +51 -32
  190. sglang/srt/layers/quantization/gptq.py +25 -17
  191. sglang/srt/layers/quantization/modelopt_quant.py +190 -55
  192. sglang/srt/layers/quantization/moe_wna16.py +21 -18
  193. sglang/srt/layers/quantization/mxfp4.py +74 -42
  194. sglang/srt/layers/quantization/quark/quark_moe.py +48 -30
  195. sglang/srt/layers/quantization/unquant.py +135 -47
  196. sglang/srt/layers/quantization/w4afp8.py +26 -17
  197. sglang/srt/layers/quantization/w8a8_fp8.py +35 -20
  198. sglang/srt/layers/quantization/w8a8_int8.py +91 -41
  199. sglang/srt/layers/rotary_embedding.py +78 -31
  200. sglang/srt/layers/sampler.py +213 -21
  201. sglang/srt/layers/utils.py +23 -0
  202. sglang/srt/lora/backend/base_backend.py +50 -8
  203. sglang/srt/lora/backend/chunked_backend.py +348 -0
  204. sglang/srt/lora/backend/triton_backend.py +99 -5
  205. sglang/srt/lora/layers.py +32 -0
  206. sglang/srt/lora/lora.py +8 -3
  207. sglang/srt/lora/lora_manager.py +52 -118
  208. sglang/srt/lora/mem_pool.py +25 -11
  209. sglang/srt/lora/triton_ops/__init__.py +4 -0
  210. sglang/srt/lora/triton_ops/chunked_sgmv_expand.py +214 -0
  211. sglang/srt/lora/triton_ops/chunked_sgmv_shrink.py +174 -0
  212. sglang/srt/lora/utils.py +22 -11
  213. sglang/srt/managers/async_dynamic_batch_tokenizer.py +170 -0
  214. sglang/srt/managers/cache_controller.py +199 -301
  215. sglang/srt/managers/data_parallel_controller.py +115 -80
  216. sglang/srt/managers/detokenizer_manager.py +19 -15
  217. sglang/srt/managers/disagg_service.py +46 -0
  218. sglang/srt/managers/io_struct.py +340 -109
  219. sglang/srt/managers/mm_utils.py +44 -6
  220. sglang/srt/managers/multi_tokenizer_mixin.py +357 -407
  221. sglang/srt/managers/multimodal_processor.py +1 -2
  222. sglang/srt/managers/overlap_utils.py +53 -0
  223. sglang/srt/managers/schedule_batch.py +240 -138
  224. sglang/srt/managers/schedule_policy.py +144 -17
  225. sglang/srt/managers/scheduler.py +502 -209
  226. sglang/srt/managers/scheduler_input_blocker.py +1 -1
  227. sglang/srt/managers/scheduler_metrics_mixin.py +99 -126
  228. sglang/srt/managers/scheduler_output_processor_mixin.py +75 -22
  229. sglang/srt/managers/scheduler_profiler_mixin.py +6 -6
  230. sglang/srt/managers/scheduler_update_weights_mixin.py +7 -0
  231. sglang/srt/managers/tokenizer_communicator_mixin.py +675 -0
  232. sglang/srt/managers/tokenizer_manager.py +320 -632
  233. sglang/srt/managers/tp_worker.py +81 -22
  234. sglang/srt/managers/tp_worker_overlap_thread.py +71 -56
  235. sglang/srt/managers/utils.py +1 -45
  236. sglang/srt/mem_cache/allocator.py +14 -20
  237. sglang/srt/mem_cache/allocator_ascend.py +41 -27
  238. sglang/srt/mem_cache/base_prefix_cache.py +1 -1
  239. sglang/srt/mem_cache/chunk_cache.py +8 -1
  240. sglang/srt/mem_cache/evict_policy.py +23 -0
  241. sglang/srt/mem_cache/hicache_storage.py +43 -24
  242. sglang/srt/mem_cache/hiradix_cache.py +222 -75
  243. sglang/srt/mem_cache/memory_pool.py +535 -58
  244. sglang/srt/mem_cache/memory_pool_host.py +239 -228
  245. sglang/srt/mem_cache/radix_cache.py +222 -73
  246. sglang/srt/mem_cache/radix_cache_cpp.py +11 -8
  247. sglang/srt/mem_cache/storage/__init__.py +10 -0
  248. sglang/srt/mem_cache/storage/aibrix_kvcache/aibrix_kvcache_storage.py +151 -0
  249. sglang/srt/mem_cache/storage/aibrix_kvcache/unit_test.py +109 -0
  250. sglang/srt/mem_cache/storage/backend_factory.py +223 -0
  251. sglang/srt/mem_cache/storage/eic/eic_storage.py +778 -0
  252. sglang/srt/mem_cache/storage/eic/test_unit.py +115 -0
  253. sglang/srt/mem_cache/storage/hf3fs/hf3fs_client.py +164 -0
  254. sglang/srt/mem_cache/storage/hf3fs/{client_hf3fs.py → hf3fs_usrbio_client.py} +5 -1
  255. sglang/srt/mem_cache/storage/hf3fs/storage_hf3fs.py +259 -62
  256. sglang/srt/mem_cache/storage/lmcache/lmc_radix_cache.py +284 -0
  257. sglang/srt/mem_cache/storage/lmcache/unit_test.py +121 -0
  258. sglang/srt/mem_cache/storage/mooncake_store/mooncake_store.py +166 -17
  259. sglang/srt/mem_cache/swa_radix_cache.py +25 -36
  260. sglang/srt/metrics/collector.py +511 -132
  261. sglang/srt/metrics/func_timer.py +2 -7
  262. sglang/srt/metrics/startup_func_log_and_timer.py +150 -0
  263. sglang/srt/metrics/utils.py +8 -1
  264. sglang/srt/model_executor/cpu_graph_runner.py +640 -0
  265. sglang/srt/model_executor/cuda_graph_runner.py +52 -37
  266. sglang/srt/model_executor/forward_batch_info.py +82 -40
  267. sglang/srt/model_executor/model_runner.py +432 -157
  268. sglang/srt/model_executor/npu_graph_runner.py +12 -5
  269. sglang/srt/model_loader/__init__.py +9 -3
  270. sglang/srt/model_loader/loader.py +133 -5
  271. sglang/srt/model_loader/remote_instance_weight_loader_utils.py +69 -0
  272. sglang/srt/model_loader/weight_utils.py +158 -3
  273. sglang/srt/models/apertus.py +686 -0
  274. sglang/srt/models/bailing_moe.py +820 -217
  275. sglang/srt/models/bailing_moe_nextn.py +168 -0
  276. sglang/srt/models/deepseek_nextn.py +6 -1
  277. sglang/srt/models/deepseek_v2.py +607 -130
  278. sglang/srt/models/dots_ocr.py +173 -0
  279. sglang/srt/models/dots_vlm.py +174 -0
  280. sglang/srt/models/dots_vlm_vit.py +337 -0
  281. sglang/srt/models/ernie4.py +1 -1
  282. sglang/srt/models/falcon_h1.py +576 -0
  283. sglang/srt/models/gemma3_causal.py +0 -2
  284. sglang/srt/models/gemma3_mm.py +1 -1
  285. sglang/srt/models/gemma3n_mm.py +2 -2
  286. sglang/srt/models/glm4_moe.py +4 -4
  287. sglang/srt/models/glm4_moe_nextn.py +2 -2
  288. sglang/srt/models/glm4v.py +5 -3
  289. sglang/srt/models/glm4v_moe.py +4 -1
  290. sglang/srt/models/gpt_oss.py +8 -31
  291. sglang/srt/models/kimi_vl_moonvit.py +2 -2
  292. sglang/srt/models/llama.py +4 -0
  293. sglang/srt/models/llama4.py +9 -0
  294. sglang/srt/models/llama_eagle3.py +13 -0
  295. sglang/srt/models/longcat_flash.py +3 -3
  296. sglang/srt/models/longcat_flash_nextn.py +1 -1
  297. sglang/srt/models/mllama4.py +40 -4
  298. sglang/srt/models/opt.py +637 -0
  299. sglang/srt/models/qwen2_5_vl.py +29 -5
  300. sglang/srt/models/qwen2_audio.py +1 -1
  301. sglang/srt/models/qwen2_moe.py +120 -13
  302. sglang/srt/models/qwen2_vl.py +1 -1
  303. sglang/srt/models/qwen3.py +18 -3
  304. sglang/srt/models/qwen3_moe.py +32 -4
  305. sglang/srt/models/qwen3_next.py +1069 -0
  306. sglang/srt/models/qwen3_next_mtp.py +112 -0
  307. sglang/srt/models/qwen3_vl.py +787 -0
  308. sglang/srt/models/qwen3_vl_moe.py +471 -0
  309. sglang/srt/models/registry.py +15 -3
  310. sglang/srt/models/sarashina2_vision.py +269 -0
  311. sglang/srt/models/solar.py +505 -0
  312. sglang/srt/models/starcoder2.py +357 -0
  313. sglang/srt/models/step3_vl.py +1 -1
  314. sglang/srt/models/torch_native_llama.py +9 -2
  315. sglang/srt/models/utils.py +51 -0
  316. sglang/srt/multimodal/processors/base_processor.py +15 -7
  317. sglang/srt/multimodal/processors/dots_vlm.py +98 -0
  318. sglang/srt/multimodal/processors/glm4v.py +9 -9
  319. sglang/srt/multimodal/processors/internvl.py +153 -129
  320. sglang/srt/multimodal/processors/qwen_vl.py +23 -6
  321. sglang/srt/multimodal/processors/sarashina2_vision.py +81 -0
  322. sglang/srt/offloader.py +27 -3
  323. sglang/srt/parser/jinja_template_utils.py +6 -0
  324. sglang/srt/sampling/sampling_batch_info.py +38 -17
  325. sglang/srt/sampling/sampling_params.py +7 -0
  326. sglang/srt/server_args.py +966 -267
  327. sglang/srt/server_args_config_parser.py +146 -0
  328. sglang/srt/single_batch_overlap.py +151 -0
  329. sglang/srt/speculative/cpp_ngram/ngram.cpp +374 -0
  330. sglang/srt/speculative/cpp_ngram/ngram.h +110 -0
  331. sglang/srt/speculative/cpp_ngram/ngram_cache.py +138 -0
  332. sglang/srt/speculative/cpp_ngram/ngram_cache_binding.cpp +43 -0
  333. sglang/srt/speculative/cpp_ngram/param.h +125 -0
  334. sglang/srt/speculative/cpp_ngram/queue.h +71 -0
  335. sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +7 -1
  336. sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py +13 -2
  337. sglang/srt/speculative/{eagle_utils.py → eagle_info.py} +207 -757
  338. sglang/srt/speculative/eagle_worker.py +99 -28
  339. sglang/srt/speculative/ngram_utils.py +428 -0
  340. sglang/srt/speculative/ngram_worker.py +245 -0
  341. sglang/srt/speculative/spec_info.py +52 -0
  342. sglang/srt/speculative/spec_utils.py +606 -0
  343. sglang/srt/speculative/standalone_worker.py +109 -0
  344. sglang/srt/torch_memory_saver_adapter.py +5 -7
  345. sglang/srt/tracing/trace.py +578 -0
  346. sglang/srt/two_batch_overlap.py +8 -5
  347. sglang/srt/utils/__init__.py +2 -0
  348. sglang/srt/{utils.py → utils/common.py} +433 -77
  349. sglang/srt/{hf_transformers_utils.py → utils/hf_transformers_utils.py} +53 -5
  350. sglang/srt/{patch_torch.py → utils/patch_torch.py} +8 -0
  351. sglang/srt/utils/rpd_utils.py +452 -0
  352. sglang/srt/utils/slow_rank_detector.py +71 -0
  353. sglang/srt/warmup.py +8 -4
  354. sglang/srt/weight_sync/utils.py +2 -2
  355. sglang/test/attention/test_trtllm_mla_backend.py +169 -5
  356. sglang/test/get_logits_ut.py +57 -0
  357. sglang/test/run_eval.py +79 -11
  358. sglang/test/runners.py +5 -1
  359. sglang/test/simple_eval_common.py +5 -2
  360. sglang/test/simple_eval_mmmu_vlm.py +441 -0
  361. sglang/test/test_block_fp8.py +2 -2
  362. sglang/test/test_cutlass_moe.py +24 -6
  363. sglang/test/test_deterministic.py +297 -0
  364. sglang/test/test_disaggregation_utils.py +77 -0
  365. sglang/test/test_fp4_moe.py +370 -1
  366. sglang/test/test_programs.py +1 -1
  367. sglang/test/test_utils.py +383 -5
  368. sglang/utils.py +21 -1
  369. sglang/version.py +1 -1
  370. {sglang-0.5.2rc2.dist-info → sglang-0.5.3.dist-info}/METADATA +69 -124
  371. {sglang-0.5.2rc2.dist-info → sglang-0.5.3.dist-info}/RECORD +375 -245
  372. sglang/srt/disaggregation/launch_lb.py +0 -118
  373. sglang/srt/mem_cache/lora_radix_cache.py +0 -421
  374. /sglang/srt/{poll_based_barrier.py → utils/poll_based_barrier.py} +0 -0
  375. {sglang-0.5.2rc2.dist-info → sglang-0.5.3.dist-info}/WHEEL +0 -0
  376. {sglang-0.5.2rc2.dist-info → sglang-0.5.3.dist-info}/licenses/LICENSE +0 -0
  377. {sglang-0.5.2rc2.dist-info → sglang-0.5.3.dist-info}/top_level.txt +0 -0
sglang/srt/server_args.py CHANGED
@@ -19,12 +19,11 @@ import json
19
19
  import logging
20
20
  import os
21
21
  import random
22
- import sys
23
22
  import tempfile
24
23
  from typing import List, Literal, Optional, Union
25
24
 
25
+ from sglang.srt.connector import ConnectorType
26
26
  from sglang.srt.function_call.function_call_parser import FunctionCallParser
27
- from sglang.srt.hf_transformers_utils import check_gguf_file, get_config
28
27
  from sglang.srt.lora.lora_registry import LoRARef
29
28
  from sglang.srt.parser.reasoning_parser import ReasoningParser
30
29
  from sglang.srt.utils import (
@@ -36,18 +35,22 @@ from sglang.srt.utils import (
36
35
  is_cuda,
37
36
  is_flashinfer_available,
38
37
  is_hip,
38
+ is_npu,
39
39
  is_port_available,
40
40
  is_remote_url,
41
41
  is_sm90_supported,
42
42
  is_sm100_supported,
43
43
  is_triton_kernels_available,
44
44
  is_valid_ipv6_address,
45
+ json_list_type,
45
46
  nullable_str,
47
+ parse_connector_type,
46
48
  )
49
+ from sglang.srt.utils.hf_transformers_utils import check_gguf_file, get_config
50
+ from sglang.utils import is_in_ci
47
51
 
48
52
  logger = logging.getLogger(__name__)
49
53
 
50
-
51
54
  # Define constants
52
55
  LOAD_FORMAT_CHOICES = [
53
56
  "auto",
@@ -60,6 +63,7 @@ LOAD_FORMAT_CHOICES = [
60
63
  "bitsandbytes",
61
64
  "layered",
62
65
  "remote",
66
+ "remote_instance",
63
67
  ]
64
68
 
65
69
  QUANTIZATION_CHOICES = [
@@ -86,9 +90,12 @@ ATTENTION_BACKEND_CHOICES = [
86
90
  # Common
87
91
  "triton",
88
92
  "torch_native",
93
+ "flex_attention",
94
+ "nsa",
89
95
  # NVIDIA specific
90
96
  "cutlass_mla",
91
97
  "fa3",
98
+ "fa4",
92
99
  "flashinfer",
93
100
  "flashmla",
94
101
  "trtllm_mla",
@@ -102,8 +109,18 @@ ATTENTION_BACKEND_CHOICES = [
102
109
  "ascend",
103
110
  ]
104
111
 
112
+ LORA_BACKEND_CHOICES = ["triton", "csgmv"]
113
+
105
114
  DISAGG_TRANSFER_BACKEND_CHOICES = ["mooncake", "nixl", "ascend", "fake"]
106
115
 
116
+ GRAMMAR_BACKEND_CHOICES = ["xgrammar", "outlines", "llguidance", "none"]
117
+
118
+ DETERMINISTIC_ATTENTION_BACKEND_CHOICES = ["flashinfer", "fa3", "triton"]
119
+
120
+ NSA_CHOICES = ["flashmla_prefill", "flashmla_decode", "fa3", "tilelang", "aiter"]
121
+
122
+ RADIX_EVICTION_POLICY_CHOICES = ["lru", "lfu"]
123
+
107
124
 
108
125
  # Allow external code to add more choices
109
126
  def add_load_format_choices(choices):
@@ -122,6 +139,18 @@ def add_disagg_transfer_backend_choices(choices):
122
139
  DISAGG_TRANSFER_BACKEND_CHOICES.extend(choices)
123
140
 
124
141
 
142
+ def add_grammar_backend_choices(choices):
143
+ GRAMMAR_BACKEND_CHOICES.extend(choices)
144
+
145
+
146
+ def add_deterministic_attention_backend_choices(choices):
147
+ DETERMINISTIC_ATTENTION_BACKEND_CHOICES.extend(choices)
148
+
149
+
150
+ def add_radix_eviction_policy_choices(choices):
151
+ RADIX_EVICTION_POLICY_CHOICES.extend(choices)
152
+
153
+
125
154
  @dataclasses.dataclass
126
155
  class ServerArgs:
127
156
  # Model and tokenizer
@@ -151,20 +180,25 @@ class ServerArgs:
151
180
  quantization: Optional[str] = None
152
181
  quantization_param_path: Optional[str] = None
153
182
  kv_cache_dtype: str = "auto"
183
+ enable_fp32_lm_head: bool = False
154
184
 
155
185
  # Memory and scheduling
156
186
  mem_fraction_static: Optional[float] = None
157
187
  max_running_requests: Optional[int] = None
158
- max_queued_requests: Optional[int] = sys.maxsize
188
+ max_queued_requests: Optional[int] = None
159
189
  max_total_tokens: Optional[int] = None
160
190
  chunked_prefill_size: Optional[int] = None
161
191
  max_prefill_tokens: int = 16384
162
192
  schedule_policy: str = "fcfs"
193
+ enable_priority_scheduling: bool = False
194
+ schedule_low_priority_values_first: bool = False
195
+ priority_scheduling_preemption_threshold: int = 10
163
196
  schedule_conservativeness: float = 1.0
164
197
  page_size: Optional[int] = None
165
198
  hybrid_kvcache_ratio: Optional[float] = None
166
199
  swa_full_tokens_ratio: float = 0.8
167
200
  disable_hybrid_swa_memory: bool = False
201
+ radix_eviction_policy: str = "lru"
168
202
 
169
203
  # Runtime options
170
204
  device: Optional[str] = None
@@ -191,6 +225,8 @@ class ServerArgs:
191
225
  show_time_cost: bool = False
192
226
  enable_metrics: bool = False
193
227
  enable_metrics_for_all_schedulers: bool = False
228
+ tokenizer_metrics_custom_labels_header: str = "x-custom-labels"
229
+ tokenizer_metrics_allowed_custom_labels: Optional[List[str]] = None
194
230
  bucket_time_to_first_token: Optional[List[float]] = None
195
231
  bucket_inter_token_latency: Optional[List[float]] = None
196
232
  bucket_e2e_request_latency: Optional[List[float]] = None
@@ -201,6 +237,8 @@ class ServerArgs:
201
237
  enable_request_time_stats_logging: bool = False
202
238
  kv_events_config: Optional[str] = None
203
239
  gc_warning_threshold_secs: float = 0.0
240
+ enable_trace: bool = False
241
+ oltp_traces_endpoint: str = "localhost:4317"
204
242
 
205
243
  # API related
206
244
  api_key: Optional[str] = None
@@ -217,6 +255,9 @@ class ServerArgs:
217
255
  # Data parallelism
218
256
  dp_size: int = 1
219
257
  load_balance_method: str = "round_robin"
258
+ load_watch_interval: float = 0.1
259
+ # FIXME: remove this after dp rank scheduling is fully supported with PD-Disaggregation
260
+ prefill_round_robin_balance: bool = False
220
261
 
221
262
  # Multi-node distributed serving
222
263
  dist_init_addr: Optional[str] = None
@@ -237,6 +278,7 @@ class ServerArgs:
237
278
  max_loaded_loras: Optional[int] = None
238
279
  max_loras_per_batch: int = 8
239
280
  lora_backend: str = "triton"
281
+ max_lora_chunk_size: Optional[int] = 16
240
282
 
241
283
  # Kernel backend
242
284
  attention_backend: Optional[str] = None
@@ -245,16 +287,28 @@ class ServerArgs:
245
287
  sampling_backend: Optional[str] = None
246
288
  grammar_backend: Optional[str] = None
247
289
  mm_attention_backend: Optional[str] = None
290
+ nsa_prefill: str = "flashmla_prefill"
291
+ nsa_decode: str = "fa3"
248
292
 
249
293
  # Speculative decoding
250
294
  speculative_algorithm: Optional[str] = None
251
295
  speculative_draft_model_path: Optional[str] = None
296
+ speculative_draft_model_revision: Optional[str] = None
252
297
  speculative_num_steps: Optional[int] = None
253
298
  speculative_eagle_topk: Optional[int] = None
254
299
  speculative_num_draft_tokens: Optional[int] = None
255
300
  speculative_accept_threshold_single: float = 1.0
256
301
  speculative_accept_threshold_acc: float = 1.0
257
302
  speculative_token_map: Optional[str] = None
303
+ speculative_attention_mode: str = "prefill"
304
+ # For ngram only
305
+ speculative_ngram_min_match_window_size: int = 1
306
+ speculative_ngram_max_match_window_size: int = 12
307
+ speculative_ngram_min_bfs_breadth: int = 1
308
+ speculative_ngram_max_bfs_breadth: int = 10
309
+ speculative_ngram_match_type: Literal["BFS", "PROB"] = "BFS"
310
+ speculative_ngram_branch_length: int = 18
311
+ speculative_ngram_capacity: int = 10 * 1000 * 1000
258
312
 
259
313
  # Expert parallelism
260
314
  ep_size: int = 1
@@ -286,6 +340,10 @@ class ServerArgs:
286
340
  deepep_config: Optional[str] = None
287
341
  moe_dense_tp_size: Optional[int] = None
288
342
 
343
+ # Mamba cache
344
+ max_mamba_cache_size: Optional[int] = None
345
+ mamba_ssm_dtype: str = "float32"
346
+
289
347
  # Hierarchical cache
290
348
  enable_hierarchical_cache: bool = False
291
349
  hicache_ratio: float = 2.0
@@ -296,6 +354,8 @@ class ServerArgs:
296
354
  hicache_storage_backend: Optional[str] = None
297
355
  hicache_storage_prefetch_policy: str = "best_effort"
298
356
  hicache_storage_backend_extra_config: Optional[str] = None
357
+ # LMCache
358
+ enable_lmcache: bool = False
299
359
 
300
360
  # Double Sparsity
301
361
  enable_double_sparsity: bool = False
@@ -327,11 +387,13 @@ class ServerArgs:
327
387
  disable_outlines_disk_cache: bool = False
328
388
  disable_custom_all_reduce: bool = False
329
389
  enable_mscclpp: bool = False
390
+ enable_torch_symm_mem: bool = False
330
391
  disable_overlap_schedule: bool = False
331
392
  enable_mixed_chunk: bool = False
332
393
  enable_dp_attention: bool = False
333
394
  enable_dp_lm_head: bool = False
334
395
  enable_two_batch_overlap: bool = False
396
+ enable_single_batch_overlap: bool = False
335
397
  tbo_token_distribution_threshold: float = 0.48
336
398
  enable_torch_compile: bool = False
337
399
  torch_compile_max_bs: int = 32
@@ -340,17 +402,27 @@ class ServerArgs:
340
402
  enable_p2p_check: bool = False
341
403
  triton_attention_reduce_in_fp32: bool = False
342
404
  triton_attention_num_kv_splits: int = 8
405
+ triton_attention_split_tile_size: Optional[int] = None
343
406
  num_continuous_decode_steps: int = 1
344
407
  delete_ckpt_after_loading: bool = False
345
408
  enable_memory_saver: bool = False
409
+ enable_weights_cpu_backup: bool = False
346
410
  allow_auto_truncate: bool = False
347
411
  enable_custom_logit_processor: bool = False
348
412
  flashinfer_mla_disable_ragged: bool = False
349
413
  disable_shared_experts_fusion: bool = False
350
414
  disable_chunked_prefix_cache: bool = False
351
415
  disable_fast_image_processor: bool = False
416
+ keep_mm_feature_on_device: bool = False
352
417
  enable_return_hidden_states: bool = False
353
418
  scheduler_recv_interval: int = 1
419
+ numa_node: Optional[List[int]] = None
420
+ enable_deterministic_inference: bool = False
421
+
422
+ # Dynamic batch tokenizer
423
+ enable_dynamic_batch_tokenizer: bool = False
424
+ dynamic_batch_tokenizer_batch_size: int = 32
425
+ dynamic_batch_tokenizer_batch_timeout: float = 0.002
354
426
 
355
427
  # Debug tensor dumps
356
428
  debug_tensor_dump_output_folder: Optional[str] = None
@@ -359,66 +431,105 @@ class ServerArgs:
359
431
  debug_tensor_dump_prefill_only: bool = False
360
432
 
361
433
  # PD disaggregation: can be "null" (not disaggregated), "prefill" (prefill-only), or "decode" (decode-only)
362
- disaggregation_mode: str = "null"
434
+ disaggregation_mode: Literal["null", "prefill", "decode"] = "null"
363
435
  disaggregation_transfer_backend: str = "mooncake"
364
436
  disaggregation_bootstrap_port: int = 8998
365
437
  disaggregation_decode_tp: Optional[int] = None
366
438
  disaggregation_decode_dp: Optional[int] = None
367
439
  disaggregation_prefill_pp: Optional[int] = 1
368
440
  disaggregation_ib_device: Optional[str] = None
441
+ disaggregation_decode_enable_offload_kvcache: bool = False
369
442
  num_reserved_decode_tokens: int = 512 # used for decode kv cache offload in PD
370
- pdlb_url: Optional[str] = None
443
+ # FIXME: hack to reduce ITL when decode bs is small
444
+ disaggregation_decode_polling_interval: int = 1
371
445
 
372
- # For model weight update
446
+ # For model weight update and weight loading
373
447
  custom_weight_loader: Optional[List[str]] = None
374
448
  weight_loader_disable_mmap: bool = False
449
+ remote_instance_weight_loader_seed_instance_ip: Optional[str] = None
450
+ remote_instance_weight_loader_seed_instance_service_port: Optional[int] = None
451
+ remote_instance_weight_loader_send_weights_group_ports: Optional[List[int]] = None
375
452
 
376
453
  # For PD-Multiplexing
377
454
  enable_pdmux: bool = False
378
455
  sm_group_num: int = 3
379
456
 
380
- # Deprecated arguments
381
- enable_ep_moe: bool = False
382
- enable_deepep_moe: bool = False
383
- enable_flashinfer_cutlass_moe: bool = False
384
- enable_flashinfer_trtllm_moe: bool = False
385
- enable_triton_kernel_moe: bool = False
386
- enable_flashinfer_mxfp4_moe: bool = False
387
-
388
457
  def __post_init__(self):
389
- # Check deprecated arguments
390
- if self.enable_ep_moe:
391
- self.ep_size = self.tp_size
392
- print_deprecated_warning(
393
- "NOTE: --enable-ep-moe is deprecated. Please set `--ep-size` to the same value as `--tp-size` instead."
394
- )
395
- if self.enable_deepep_moe:
396
- self.moe_a2a_backend = "deepep"
397
- print_deprecated_warning(
398
- "NOTE: --enable-deepep-moe is deprecated. Please set `--moe-a2a-backend` to 'deepep' instead."
399
- )
400
- if self.enable_triton_kernel_moe:
401
- self.moe_runner_backend = "triton_kernel"
402
- print_deprecated_warning(
403
- "NOTE: --enable-triton-kernel-moe is deprecated. Please set `--moe-runner-backend` to 'triton_kernel' instead."
404
- )
405
- if self.enable_flashinfer_cutlass_moe:
406
- self.moe_runner_backend = "flashinfer_cutlass"
407
- print_deprecated_warning(
408
- "NOTE: --enable-flashinfer-cutlass-moe is deprecated. Please set `--moe-runner-backend` to 'flashinfer_cutlass' instead."
409
- )
410
- if self.enable_flashinfer_trtllm_moe:
411
- self.moe_runner_backend = "flashinfer_trtllm"
412
- print_deprecated_warning(
413
- "NOTE: --enable-flashinfer-trtllm-moe is deprecated. Please set `--moe-runner-backend` to 'flashinfer_trtllm' instead."
414
- )
415
- if self.enable_flashinfer_mxfp4_moe:
416
- self.moe_runner_backend = "flashinfer_mxfp4"
417
- print_deprecated_warning(
418
- "NOTE: --enable-flashinfer-mxfp4-moe is deprecated. Please set `--moe-runner-backend` to 'flashinfer_mxfp4' instead."
419
- )
458
+ """
459
+ Orchestrates the handling of various server arguments, ensuring proper configuration and validation.
460
+ """
461
+ # Handle deprecated arguments.
462
+ self._handle_deprecated_args()
463
+
464
+ # Set missing default values.
465
+ self._handle_missing_default_values()
466
+
467
+ # Get GPU memory capacity, which is a common dependency for several configuration steps.
468
+ gpu_mem = get_device_memory_capacity(self.device)
469
+
470
+ # Handle memory-related, chunked prefill, and CUDA graph batch size configurations.
471
+ self._handle_gpu_memory_settings(gpu_mem)
472
+
473
+ # Handle device-specific backends.
474
+ self._handle_hpu_backends()
475
+ self._handle_cpu_backends()
476
+
477
+ # Apply model-specific adjustments.
478
+ self._handle_model_specific_adjustments()
479
+
480
+ # Set kernel backends.
481
+ self._handle_sampling_backend()
482
+ self._handle_attention_backend_compatibility()
483
+ self._handle_page_size()
484
+ self._handle_amd_specifics()
485
+ self._handle_grammar_backend()
486
+
487
+ # Handle data parallelism.
488
+ self._handle_data_parallelism()
420
489
 
421
- # Set missing default values
490
+ # Handle MoE configurations.
491
+ self._handle_moe_kernel_config()
492
+ self._handle_deepep_moe()
493
+ self._handle_eplb_and_dispatch()
494
+ self._handle_expert_distribution_metrics()
495
+
496
+ # Handle pipeline parallelism.
497
+ self._handle_pipeline_parallelism()
498
+
499
+ # Handle Hicache settings.
500
+ self._handle_hicache()
501
+
502
+ # Handle speculative decoding logic.
503
+ self._handle_speculative_decoding()
504
+
505
+ # Handle model loading format.
506
+ self._handle_load_format()
507
+
508
+ # Handle PD disaggregation.
509
+ self._handle_disaggregation()
510
+
511
+ # Validate tokenizer settings.
512
+ self._handle_tokenizer_batching()
513
+
514
+ # Propagate environment variables.
515
+ self._handle_environment_variables()
516
+
517
+ # Validate cache settings.
518
+ self._handle_cache_compatibility()
519
+
520
+ # Validate metrics labels.
521
+ self._handle_metrics_labels()
522
+
523
+ # Handle deterministic inference.
524
+ self._handle_deterministic_inference()
525
+
526
+ # Handle any other necessary validations.
527
+ self._handle_other_validations()
528
+
529
+ def _handle_deprecated_args(self):
530
+ pass
531
+
532
+ def _handle_missing_default_values(self):
422
533
  if self.tokenizer_path is None:
423
534
  self.tokenizer_path = self.model_path
424
535
  if self.served_model_name is None:
@@ -428,51 +539,140 @@ class ServerArgs:
428
539
  if self.random_seed is None:
429
540
  self.random_seed = random.randint(0, 1 << 30)
430
541
 
431
- gpu_mem = get_device_memory_capacity(self.device)
542
+ def _handle_gpu_memory_settings(self, gpu_mem):
543
+ """
544
+ Configure GPU memory-dependent settings including
545
+ chunked_prefill_size, cuda_graph_max_bs, and mem_fraction_static.
546
+
547
+ Here are our heuristics:
548
+ - Set chunked_prefill_size and cuda_graph_max_bs based on the GPU memory capacity.
549
+ This is because GPUs with more memory are generally more powerful, we need to use a larger
550
+ chunked_prefill_size and a larger cuda_graph_max_bs to fully utilize the GPU.
551
+ - Then set mem_fraction_static based on chunked_prefill_size and cuda_graph_max_bs.
552
+
553
+ GPU memory capacity = model weights + KV cache pool + activations + cuda graph buffers
554
+
555
+ The argument mem_fraction_static is defined as (model weights + KV cache pool) / GPU memory capacity,
556
+ or equivalently, mem_fraction_static = (GPU memory capacity - activations - cuda graph buffers) / GPU memory capacity.
557
+
558
+ In order to compute mem_fraction_static, we need to estimate the size of activations and cuda graph buffers.
559
+ The activation memory is proportional to the chunked_prefill_size.
560
+ The cuda graph memory is proportional to the cuda_graph_max_bs.
561
+ We use reserved_mem = chunked_prefill_size * 1.5 + cuda_graph_max_bs * 2 to estimate the size of activations and cuda graph buffers in GB.
562
+ and set mem_fraction_static = (GPU memory capacity - reserved_mem) / GPU memory capacity.
563
+
564
+ The coefficient 1.5 is a heuristic value, in the future, we can do better estimation by looking at the model types, hidden sizes or even do a dummy run.
565
+ """
566
+ if gpu_mem is not None:
567
+ if gpu_mem < 20 * 1024:
568
+ # T4, 4080
569
+ # (chunked_prefill_size 2k, cuda_graph_max_bs 8)
570
+ if self.chunked_prefill_size is None:
571
+ self.chunked_prefill_size = 2048
572
+ if self.cuda_graph_max_bs is None:
573
+ self.cuda_graph_max_bs = 8
574
+ elif gpu_mem < 35 * 1024:
575
+ # A10, 4090, 5090
576
+ # (chunked_prefill_size 2k, cuda_graph_max_bs 16 if tp < 4 else 80)
577
+ if self.chunked_prefill_size is None:
578
+ self.chunked_prefill_size = 2048
579
+ if self.cuda_graph_max_bs is None:
580
+ # Based on detailed statistics, when serving TP1/TP2 models on lower-end GPUs with HBM < 35GB, you can either disable cuda graph or set `cuda_graph_max_bs` to a very small value to reduce the memory overhead of creating cuda graphs, with almost no impact on performance.
581
+ # However, when serving models with TP4 or TP8, we need to enable cuda graph to maintain high performance. In this case, we can set `cuda_graph_max_bs` to 80 (half of the default value 160) to reduce the memory overhead of creating cuda graphs. Looking at the logs
582
+ # from TP4 serving of qwen2-72b, a value of 80 is sufficient and can reduce the memory overhead of creating cuda graphs on lower-end GPUs compared to the original 160, avoiding OOM issues.
583
+ if self.tp_size < 4:
584
+ self.cuda_graph_max_bs = 16
585
+ else:
586
+ self.cuda_graph_max_bs = 80
587
+ elif gpu_mem < 60 * 1024:
588
+ # A100 (40GB), L40,
589
+ # (chunked_prefill_size 4k, cuda_graph_max_bs 32 if tp < 4 else 160)
590
+ if self.chunked_prefill_size is None:
591
+ self.chunked_prefill_size = 4096
592
+ if self.cuda_graph_max_bs is None:
593
+ if self.tp_size < 4:
594
+ self.cuda_graph_max_bs = 32
595
+ else:
596
+ self.cuda_graph_max_bs = 160
597
+ elif gpu_mem < 90 * 1024:
598
+ # H100, A100
599
+ # (chunked_prefill_size 8k, cuda_graph_max_bs 256 if tp < 4 else 512)
600
+ if self.chunked_prefill_size is None:
601
+ self.chunked_prefill_size = 8192
602
+ if self.cuda_graph_max_bs is None:
603
+ if self.tp_size < 4:
604
+ self.cuda_graph_max_bs = 256
605
+ else:
606
+ self.cuda_graph_max_bs = 512
607
+ elif gpu_mem < 160 * 1024:
608
+ # H20, H200
609
+ # (chunked_prefill_size 8k, cuda_graph_max_bs 256 if tp < 4 else 512)
610
+ if self.chunked_prefill_size is None:
611
+ self.chunked_prefill_size = 8192
612
+ if self.cuda_graph_max_bs is None:
613
+ if self.tp_size < 4:
614
+ self.cuda_graph_max_bs = 256
615
+ else:
616
+ self.cuda_graph_max_bs = 512
617
+ else:
618
+ # B200, MI300
619
+ # (chunked_prefill_size 16k, cuda_graph_max_bs 512)
620
+ if self.chunked_prefill_size is None:
621
+ self.chunked_prefill_size = 16384
622
+ if self.cuda_graph_max_bs is None:
623
+ self.cuda_graph_max_bs = 512
624
+ else:
625
+ # Fallback defaults when gpu_mem is None
626
+ if self.chunked_prefill_size is None:
627
+ self.chunked_prefill_size = 4096
628
+ if self.cuda_graph_max_bs is None:
629
+ self.cuda_graph_max_bs = 160
432
630
 
433
- # Set mem fraction static
434
- if self.mem_fraction_static is None:
435
- if gpu_mem is not None:
436
- # GPU memory capacity = model weights + KV cache pool + activations + cuda graph buffers
437
- # mem_fraction_static = (model weights + KV cache pool) / GPU memory capacity.
438
-
439
- # We want mem_fraction_static to be as large as possible but still has enough room
440
- # for activations and cuda graph buffers. We use the following heuristic to
441
- # compute the needed size for activations and cuda graph buffers:
442
- # - The size of the activation depends on the chunked_prefill_size and model size.
443
- # - The size of cuda graph buffers depends on the cuda graph capture range and model size.
444
- # For GPUs with more memory, we use a larger chunked_prefill_size and
445
- # capture more cuda graphs, so they need to reserve more memory.
446
- parallel_size = self.tp_size * self.pp_size
447
-
448
- if gpu_mem < 20 * 1024:
449
- # T4, 4080. (chunked_prefill_size 2k, cuda_graph_max_bs 8)
450
- reserved_mem = (2.8 + parallel_size / 10) * 1024
451
- elif gpu_mem < 35 * 1024:
452
- # A10, L40, 4090, 5090. (chunked_prefill_size 2k, cuda_graph_max_bs 8)
453
- reserved_mem = (2.8 + parallel_size / 10) * 1024
454
- elif gpu_mem < 90 * 1024:
455
- # H100, A100. (chunked_prefill_size 8k, cuda_graph_max_bs 160)
456
- reserved_mem = (9.5 + parallel_size / 2) * 1024
457
- elif gpu_mem < 100 * 1024:
458
- # H20. (chunked_prefill_size 8k, cuda_graph_max_bs 256)
459
- reserved_mem = (12 + parallel_size / 2) * 1024
460
- elif gpu_mem < 160 * 1024:
461
- # H200. (chunked_prefill_size 8k, cuda_graph_max_bs 256)
462
- reserved_mem = (12 + parallel_size / 2) * 1024
463
- else:
464
- # B200, MI300. (chunked_prefill_size 16k, cuda_graph_max_bs 512)
465
- reserved_mem = 32 * 1024
631
+ # Set cuda graph batch sizes
632
+ if self.cuda_graph_bs is None:
633
+ self.cuda_graph_bs = self._generate_cuda_graph_batch_sizes()
634
+ else:
635
+ self.cuda_graph_max_bs = max(self.cuda_graph_bs)
466
636
 
467
- if self.speculative_algorithm is not None:
468
- # draft model and larger cuda graph buffers
637
+ if self.mem_fraction_static is None:
638
+ # Constant meta data (e.g., from attention backend)
639
+ reserved_mem = 512
640
+ # For activation during large prefill
641
+ if self.chunked_prefill_size > 0:
642
+ reserved_mem += max(self.chunked_prefill_size, 2048) * 1.5
643
+ else:
644
+ reserved_mem += max(self.max_prefill_tokens, 2048) * 1.5
645
+ # For cuda graphs
646
+ reserved_mem += self.cuda_graph_max_bs * 2
647
+ # Some adjustments for large parallel size
648
+ reserved_mem += self.tp_size * self.pp_size / 8 * 1024
649
+
650
+ if self.enable_dp_attention:
651
+ # DP attention needs more padding for some operations
652
+ reserved_mem += self.cuda_graph_max_bs * self.dp_size * 3
653
+
654
+ # DP attention uses much more memory for large cuda graph max bs,
655
+ # likely due to some inefficiencies in torch allocator or our implementation.
656
+ # So we need to reserve more memory.
657
+ if self.cuda_graph_max_bs > 300:
658
+ reserved_mem += self.cuda_graph_max_bs * self.dp_size * 1.5
659
+
660
+ if gpu_mem is not None and gpu_mem > 60 * 1024:
661
+ reserved_mem = max(reserved_mem, 10 * 1024)
662
+
663
+ if self.speculative_algorithm is not None:
664
+ if self.speculative_algorithm == "STANDALONE":
665
+ # standalonedraft model and cuda graphs
666
+ reserved_mem += 6 * 1024
667
+ elif self.speculative_algorithm != "NGRAM":
668
+ # eagle draft models and cuda graphs
469
669
  reserved_mem += 2 * 1024
470
- if self.enable_dp_attention:
471
- reserved_mem += 4 * 1024
472
670
 
473
- self.mem_fraction_static = round((gpu_mem - reserved_mem) / gpu_mem, 3)
474
- else:
475
- self.mem_fraction_static = 0.88
671
+ self.mem_fraction_static = (
672
+ round((gpu_mem - reserved_mem) / gpu_mem, 3)
673
+ if gpu_mem is not None
674
+ else 0.88
675
+ )
476
676
 
477
677
  # Lazy init to avoid circular import
478
678
  # Multimodal models need more memory for the image processor
@@ -482,53 +682,192 @@ class ServerArgs:
482
682
  if model_config.is_multimodal:
483
683
  self.adjust_mem_fraction_for_vlm(model_config)
484
684
 
485
- # Set chunked prefill size, which depends on the gpu memory capacity
486
- if self.chunked_prefill_size is None:
487
- if gpu_mem is not None:
488
- if gpu_mem < 35 * 1024: # A10, L40, 4090
489
- self.chunked_prefill_size = 2048
490
- elif gpu_mem < 160 * 1024: # H100, H200, A100, H20
491
- self.chunked_prefill_size = 8192
492
- else: # B200, MI300
493
- self.chunked_prefill_size = 16384
494
- else:
495
- self.chunked_prefill_size = 4096
685
+ def _generate_cuda_graph_batch_sizes(self):
686
+ """
687
+ Generate the list of batch sizes for CUDA graph capture based on cuda_graph_max_bs.
688
+ This integrates the logic from cuda_graph_runner.py.
689
+ """
690
+ # Handle disable_cuda_graph_padding as the first condition for both spec and non-spec
691
+ if self.disable_cuda_graph_padding:
692
+ capture_bs = list(range(1, self.cuda_graph_max_bs + 1))
693
+ elif self.speculative_algorithm is None:
694
+ # Normal case: [1, 2, 4, 8, 12] + list(range(16, 257, 8)) + list(range(272, 512, 16)) + list(range(512, cuda_graph_max_bs + 1))
695
+ capture_bs = (
696
+ [1, 2, 4, 8, 12]
697
+ + list(range(16, 257, 8))
698
+ + list(range(272, 512, 16))
699
+ + list(range(512, self.cuda_graph_max_bs + 1, 32))
700
+ )
701
+ else:
702
+ # Spec decoding case: list(range(1, 9, 1)) + list(range(10, 33, 2)) + list(range(40, 64, 4)) + list(range(72, 257, 8))
703
+ capture_bs = (
704
+ list(range(1, 9, 1))
705
+ + list(range(10, 33, 2))
706
+ + list(range(40, 64, 4))
707
+ + list(range(72, 257, 8))
708
+ + list(range(272, self.cuda_graph_max_bs + 1, 16))
709
+ )
496
710
 
497
- # Set cuda graph max batch size
498
- if self.cuda_graph_max_bs is None:
499
- # Based on detailed statistics, when serving TP1/TP2 models on lower-end GPUs with HBM<25G, you can either disable cuda graph or set `cuda_graph_max_bs` to a very small value to reduce the memory overhead of creating cuda graphs, with almost no impact on performance. However, when serving models with TP4 or TP8, we need to enable cuda graph to maintain high performance. In this case, we can set `cuda_graph_max_bs` to 80 (half of the default value 160) to reduce the memory overhead of creating cuda graphs. Looking at the logs from TP4 serving of qwen2-72b, a value of 80 is sufficient and can reduce the memory overhead of creating cuda graphs on lower-end GPUs compared to the original 160, avoiding OOM issues.
500
- if gpu_mem is not None and gpu_mem < 35 * 1024:
501
- if self.tp_size < 4:
502
- self.cuda_graph_max_bs = 8
503
- else:
504
- self.cuda_graph_max_bs = 80
711
+ capture_bs = [bs for bs in capture_bs if bs <= self.cuda_graph_max_bs]
712
+
713
+ return capture_bs
505
714
 
506
- # Set kernel backends for hpu device
715
+ def _handle_hpu_backends(self):
507
716
  if self.device == "hpu":
508
717
  self.attention_backend = "torch_native"
509
718
  self.sampling_backend = "pytorch"
510
719
 
511
- # Model-specific adjustments
512
- self.model_specific_adjustments()
513
-
514
- # Set kernel backends
720
+ def _handle_cpu_backends(self):
515
721
  if self.device == "cpu":
516
722
  if self.attention_backend is None:
517
723
  self.attention_backend = "intel_amx"
518
724
  self.sampling_backend = "pytorch"
519
725
 
726
+ def _handle_model_specific_adjustments(self):
727
+ from sglang.srt.configs.model_config import is_deepseek_nsa
728
+
729
+ if parse_connector_type(self.model_path) == ConnectorType.INSTANCE:
730
+ return
731
+
732
+ hf_config = self.get_hf_config()
733
+ model_arch = hf_config.architectures[0]
734
+ if model_arch in ["GptOssForCausalLM"]:
735
+ if self.attention_backend is None:
736
+ if is_cuda() and is_sm100_supported():
737
+ self.attention_backend = "trtllm_mha"
738
+ elif is_cuda() and is_sm90_supported():
739
+ self.attention_backend = "fa3"
740
+ else:
741
+ self.attention_backend = "triton"
742
+ supported_backends = ["triton", "trtllm_mha", "fa3"]
743
+ logger.info(
744
+ f"Use {self.attention_backend} as attention backend for GptOssForCausalLM"
745
+ )
746
+ assert (
747
+ self.attention_backend in supported_backends
748
+ ), f"GptOssForCausalLM requires one of {supported_backends} attention backend, but got '{self.attention_backend}'"
749
+
750
+ if is_sm100_supported():
751
+ if not self.enable_dp_attention:
752
+ self.enable_flashinfer_allreduce_fusion = True
753
+ logger.info(
754
+ "Enable FlashInfer AllReduce Fusion on sm100 for GptOssForCausalLM"
755
+ )
756
+ quantization_config = getattr(hf_config, "quantization_config", None)
757
+ is_mxfp4_quant_format = (
758
+ quantization_config is not None
759
+ and quantization_config.get("quant_method") == "mxfp4"
760
+ )
761
+
762
+ if is_sm100_supported() and is_mxfp4_quant_format:
763
+ self.moe_runner_backend = "flashinfer_mxfp4"
764
+ logger.warning(
765
+ "Detected SM100 and MXFP4 quantization format for GPT-OSS model, enabling FlashInfer MXFP4 MOE kernel."
766
+ )
767
+ else:
768
+ if self.moe_runner_backend == "triton_kernel":
769
+ assert (
770
+ self.ep_size == 1
771
+ ), "Triton kernel MoE is only supported when ep_size == 1"
772
+ if (
773
+ self.moe_runner_backend == "auto"
774
+ and self.ep_size == 1
775
+ and is_triton_kernels_available()
776
+ ):
777
+ self.moe_runner_backend = "triton_kernel"
778
+ logger.warning(
779
+ "Detected GPT-OSS model, enabling triton_kernels MOE kernel."
780
+ )
781
+ self.disable_hybrid_swa_memory = True
782
+ if is_mxfp4_quant_format:
783
+ # use bf16 for mxfp4 triton kernels
784
+ self.dtype = "bfloat16"
785
+
786
+ elif "Llama4" in model_arch and self.device != "cpu":
787
+ assert self.attention_backend in {
788
+ "fa3",
789
+ "aiter",
790
+ "triton",
791
+ }, "fa3, aiter, or triton is required for Llama4 model"
792
+ elif model_arch in [
793
+ "Gemma2ForCausalLM",
794
+ "Gemma3ForCausalLM",
795
+ "Gemma3ForConditionalGeneration",
796
+ "Gemma3nForCausalLM",
797
+ "Gemma3nForConditionalGeneration",
798
+ ]:
799
+ # FIXME: https://github.com/sgl-project/sglang/pull/7367 is not compatible with gemma2 model.
800
+ # It failed at this test: https://github.com/sgl-project/sglang/actions/runs/16255155597/job/45890331952#step:4:736
801
+ logger.warning(
802
+ f"Disable hybrid SWA memory for {model_arch} as it is not yet supported."
803
+ )
804
+ self.disable_hybrid_swa_memory = True
805
+
806
+ if is_deepseek_nsa(hf_config):
807
+ if (
808
+ self.attention_backend is None
809
+ and self.prefill_attention_backend is None
810
+ and self.decode_attention_backend is None
811
+ ):
812
+ self.attention_backend = "nsa"
813
+ logger.warning("Set nsa attention backend for DeepSeek NSA.")
814
+
815
+ if not is_npu():
816
+ self.enable_dp_attention = True
817
+ self.dp_size = self.tp_size
818
+ logger.warning("DP attention is enabled for DeepSeek NSA.")
819
+
820
+ self.page_size = 64
821
+ logger.warning("Setting page size to 64 for DeepSeek NSA.")
822
+
823
+ self.mem_fraction_static = 0.8
824
+ logger.warning("Setting mem fraction static to 0.8 for DeepSeek NSA.")
825
+
826
+ # For Hopper, we support both bf16 and fp8 kv cache; for Blackwell, we support fp8 only currently
827
+ import torch
828
+
829
+ major, _ = torch.cuda.get_device_capability()
830
+ if major >= 10:
831
+ self.kv_cache_dtype = "fp8_e4m3"
832
+ logger.warning("Setting KV cache dtype to fp8.")
833
+
834
+ if self.kv_cache_dtype == "fp8_e4m3":
835
+ self.nsa_prefill = "flashmla_decode"
836
+ self.nsa_decode = "flashmla_decode"
837
+ logger.warning(
838
+ "Setting NSA backend to flashmla_decode for FP8 KV Cache."
839
+ )
840
+
841
+ # Logging env vars for NSA
842
+ from sglang.srt.layers.attention.nsa.utils import (
843
+ print_nsa_bool_env_vars,
844
+ )
845
+
846
+ print_nsa_bool_env_vars()
847
+
848
+ def _handle_sampling_backend(self):
520
849
  if self.sampling_backend is None:
521
850
  self.sampling_backend = (
522
851
  "flashinfer" if is_flashinfer_available() else "pytorch"
523
852
  )
524
853
 
854
+ def _handle_attention_backend_compatibility(self):
525
855
  if self.attention_backend == "torch_native":
526
856
  logger.warning(
527
857
  "Cuda graph is disabled because of using torch native attention backend"
528
858
  )
529
859
  self.disable_cuda_graph = True
530
860
 
531
- if self.attention_backend == "ascend":
861
+ if self.attention_backend == "flex_attention":
862
+ logger.warning(
863
+ "Cuda graph is disabled because of using torch Flex Attention backend"
864
+ )
865
+ self.disable_cuda_graph = True
866
+ assert (
867
+ self.speculative_algorithm is None
868
+ ), "Speculative decoding is currently not supported with Flex Attention backend"
869
+
870
+ if is_npu() and self.attention_backend in ["ascend"]:
532
871
  logger.warning(
533
872
  "At this moment Ascend attention backend only supports a page_size of 128, change page_size to 128."
534
873
  )
@@ -590,30 +929,30 @@ class ServerArgs:
590
929
 
591
930
  if self.attention_backend == "dual_chunk_flash_attn":
592
931
  logger.warning(
593
- "Mixed chunk, radix cache, and cuda graphs are disabled because of using dual chunk flash attention backend"
932
+ "Mixed chunk and radix cache are disabled when using dual-chunk flash attention backend"
594
933
  )
595
934
  self.enable_mixed_chunk = False
596
- self.disable_cuda_graph = True
597
935
  self.disable_radix_cache = True
598
936
 
599
- # Set page size
937
+ def _handle_page_size(self):
600
938
  if self.page_size is None:
601
939
  self.page_size = 1
602
940
 
603
- # AMD-specific Triton attention KV splits default number
941
+ def _handle_amd_specifics(self):
604
942
  if is_hip():
605
943
  self.triton_attention_num_kv_splits = 16
606
944
 
607
- # Choose grammar backend
945
+ def _handle_grammar_backend(self):
608
946
  if self.grammar_backend is None:
609
947
  self.grammar_backend = "xgrammar"
610
948
 
611
- # Data parallelism attention
949
+ def _handle_data_parallelism(self):
950
+ if self.dp_size == 1:
951
+ self.enable_dp_attention = False
952
+ self.enable_dp_lm_head = False
953
+
612
954
  if self.enable_dp_attention:
613
955
  self.schedule_conservativeness = self.schedule_conservativeness * 0.3
614
- assert (
615
- self.dp_size > 1
616
- ), "Please set a dp-size > 1. You can use 1 < dp-size <= tp-size "
617
956
  assert self.tp_size % self.dp_size == 0
618
957
  self.chunked_prefill_size = self.chunked_prefill_size // self.dp_size
619
958
  logger.warning(
@@ -625,7 +964,7 @@ class ServerArgs:
625
964
  self.enable_dp_attention
626
965
  ), "Please enable dp attention when setting enable_dp_lm_head. "
627
966
 
628
- # MoE kernel
967
+ def _handle_moe_kernel_config(self):
629
968
  if self.moe_runner_backend == "flashinfer_cutlass":
630
969
  assert (
631
970
  self.quantization == "modelopt_fp4"
@@ -636,13 +975,15 @@ class ServerArgs:
636
975
  ], "The expert parallel size must be 1 or the same as the tensor parallel size"
637
976
 
638
977
  if self.moe_runner_backend == "flashinfer_trtllm":
639
- if not self.disable_shared_experts_fusion:
640
- self.disable_shared_experts_fusion = True
641
- logger.warning(
642
- "FlashInfer TRTLLM MoE is enabled. --disable-shared-experts-fusion is automatically set."
643
- )
978
+ assert (
979
+ self.quantization == "modelopt_fp4" or self.quantization == "fp8"
980
+ ), "modelopt_fp4 or fp8 quantization is required for Flashinfer TRTLLM MoE"
981
+ self.disable_shared_experts_fusion = True
982
+ logger.warning(
983
+ "FlashInfer TRTLLM MoE is enabled. --disable-shared-experts-fusion is automatically set."
984
+ )
644
985
 
645
- # DeepEP MoE
986
+ def _handle_deepep_moe(self):
646
987
  if self.moe_a2a_backend == "deepep":
647
988
  if self.deepep_mode == "normal":
648
989
  logger.warning("Cuda graph is disabled because deepep_mode=`normal`")
@@ -652,6 +993,7 @@ class ServerArgs:
652
993
  f"DeepEP MoE is enabled. The expert parallel size is adjusted to be the same as the tensor parallel size[{self.tp_size}]."
653
994
  )
654
995
 
996
+ def _handle_eplb_and_dispatch(self):
655
997
  if self.enable_eplb and (self.expert_distribution_recorder_mode is None):
656
998
  self.expert_distribution_recorder_mode = "stat"
657
999
  logger.warning(
@@ -666,6 +1008,7 @@ class ServerArgs:
666
1008
  if self.enable_eplb:
667
1009
  assert self.ep_size > 1
668
1010
 
1011
+ def _handle_expert_distribution_metrics(self):
669
1012
  if self.enable_expert_distribution_metrics and (
670
1013
  self.expert_distribution_recorder_mode is None
671
1014
  ):
@@ -677,25 +1020,42 @@ class ServerArgs:
677
1020
  elif self.expert_distribution_recorder_mode is not None:
678
1021
  self.expert_distribution_recorder_buffer_size = 1000
679
1022
 
680
- # Pipeline parallelism
1023
+ def _handle_pipeline_parallelism(self):
681
1024
  if self.pp_size > 1:
682
1025
  self.disable_overlap_schedule = True
683
1026
  logger.warning(
684
1027
  "Pipeline parallelism is incompatible with overlap schedule."
685
1028
  )
686
1029
 
687
- # Hicache
1030
+ def _handle_hicache(self):
688
1031
  if self.hicache_storage_backend == "mooncake":
689
- # to use mooncake storage backend, the following conditions must be met:
690
- self.hicache_io_backend = "kernel"
691
- self.hicache_mem_layout = "page_first"
1032
+ if self.hicache_mem_layout == "layer_first":
1033
+ if self.hicache_io_backend == "direct":
1034
+ self.hicache_mem_layout = "page_first_direct"
1035
+ elif self.hicache_io_backend == "kernel":
1036
+ self.hicache_mem_layout = "page_first"
1037
+ logger.warning(
1038
+ f"Mooncake storage backend does not support layer_first layout, "
1039
+ f"switching to {self.hicache_mem_layout} layout for {self.hicache_io_backend} io backend"
1040
+ )
1041
+
1042
+ if self.hicache_mem_layout == "page_first_direct":
1043
+ if self.hicache_io_backend != "direct":
1044
+ self.hicache_io_backend = "direct"
1045
+ logger.warning(
1046
+ "Page first direct layout only support direct io backend"
1047
+ )
692
1048
 
693
- # Speculative Decoding
1049
+ def _handle_speculative_decoding(self):
694
1050
  if self.speculative_algorithm == "NEXTN":
695
- # NEXTN shares the same implementation of EAGLE
696
1051
  self.speculative_algorithm = "EAGLE"
697
1052
 
698
- if self.speculative_algorithm in ("EAGLE", "EAGLE3"):
1053
+ if self.speculative_algorithm in ("EAGLE", "EAGLE3", "STANDALONE"):
1054
+ if self.speculative_algorithm == "STANDALONE" and self.enable_dp_attention:
1055
+ # TODO: support dp attention for standalone speculative decoding
1056
+ raise ValueError(
1057
+ "Currently standalone speculative decoding does not support dp attention."
1058
+ )
699
1059
  if self.max_running_requests is None:
700
1060
  self.max_running_requests = 48
701
1061
  self.disable_overlap_schedule = True
@@ -711,8 +1071,13 @@ class ServerArgs:
711
1071
  )
712
1072
 
713
1073
  model_arch = self.get_hf_config().architectures[0]
714
- if model_arch in ["DeepseekV3ForCausalLM", "Glm4MoeForCausalLM"]:
715
- # Auto set draft_model_path DeepSeek-V3/R1
1074
+ if model_arch in [
1075
+ "DeepseekV32ForCausalLM",
1076
+ "DeepseekV3ForCausalLM",
1077
+ "Glm4MoeForCausalLM",
1078
+ "BailingMoeForCausalLM",
1079
+ "BailingMoeV2ForCausalLM",
1080
+ ]:
716
1081
  if self.speculative_draft_model_path is None:
717
1082
  self.speculative_draft_model_path = self.model_path
718
1083
  else:
@@ -720,7 +1085,6 @@ class ServerArgs:
720
1085
  "DeepSeek MTP does not require setting speculative_draft_model_path."
721
1086
  )
722
1087
 
723
- # Auto choose parameters
724
1088
  if self.speculative_num_steps is None:
725
1089
  assert (
726
1090
  self.speculative_eagle_topk is None
@@ -760,23 +1124,63 @@ class ServerArgs:
760
1124
  "speculative_eagle_topk > 1 with page_size > 1 is unstable and produces incorrect results for paged attention backends. This combination is only supported for the 'flashinfer' backend."
761
1125
  )
762
1126
 
763
- # The token generated from the verify step is counted.
764
- # If sepculative_num_steps >= speculative_num_draft_tokens, the additional tokens will definitely be discarded.
765
- # assert self.speculative_num_steps < self.speculative_num_draft_tokens
1127
+ if self.speculative_algorithm == "NGRAM":
1128
+ if not self.device.startswith("cuda"):
1129
+ raise ValueError(
1130
+ "Ngram speculative decoding only supports CUDA device."
1131
+ )
1132
+ if self.max_running_requests is None:
1133
+ self.max_running_requests = 48
1134
+ self.disable_overlap_schedule = True
1135
+ self.enable_mixed_chunk = False
1136
+ self.speculative_eagle_topk = self.speculative_ngram_max_bfs_breadth
1137
+ if self.speculative_num_draft_tokens is None:
1138
+ self.speculative_num_draft_tokens = (
1139
+ self.speculative_ngram_max_match_window_size
1140
+ )
1141
+ logger.warning(
1142
+ "The overlap scheduler and mixed chunked prefill are disabled because of "
1143
+ "using ngram speculative decoding."
1144
+ )
1145
+
1146
+ if (
1147
+ self.speculative_eagle_topk > 1
1148
+ and self.page_size > 1
1149
+ and self.attention_backend != "flashinfer"
1150
+ ):
1151
+ raise ValueError(
1152
+ f"speculative_eagle_topk({self.speculative_eagle_topk}) > 1 "
1153
+ f"with page_size({self.page_size}) > 1 is unstable "
1154
+ "and produces incorrect results for paged attention backends. "
1155
+ "This combination is only supported for the 'flashinfer' backend."
1156
+ )
1157
+ if self.enable_dp_attention:
1158
+ # TODO: support dp attention for ngram speculative decoding
1159
+ raise ValueError(
1160
+ "Currently ngram speculative decoding does not support dp attention."
1161
+ )
766
1162
 
767
- # GGUF
1163
+ def _handle_load_format(self):
768
1164
  if (
769
1165
  self.load_format == "auto" or self.load_format == "gguf"
770
1166
  ) and check_gguf_file(self.model_path):
771
1167
  self.quantization = self.load_format = "gguf"
772
1168
 
773
- # Model loading
774
1169
  if is_remote_url(self.model_path):
775
1170
  self.load_format = "remote"
1171
+
776
1172
  if self.custom_weight_loader is None:
777
1173
  self.custom_weight_loader = []
778
1174
 
779
- # PD disaggregation
1175
+ if self.load_format == "remote_instance":
1176
+ if (
1177
+ self.remote_instance_weight_loader_seed_instance_ip is None
1178
+ or self.remote_instance_weight_loader_seed_instance_service_port is None
1179
+ or self.remote_instance_weight_loader_send_weights_group_ports is None
1180
+ ):
1181
+ self.load_format = "auto"
1182
+
1183
+ def _handle_disaggregation(self):
780
1184
  if self.disaggregation_mode == "decode":
781
1185
  assert (
782
1186
  self.disaggregation_decode_tp is None
@@ -787,6 +1191,13 @@ class ServerArgs:
787
1191
 
788
1192
  self.disable_radix_cache = True
789
1193
  logger.warning("KV cache is forced as chunk cache for decode server")
1194
+
1195
+ if self.dp_size > 1 and not is_in_ci():
1196
+ assert self.prefill_round_robin_balance, (
1197
+ "Prefill round robin balance is required when dp size > 1. "
1198
+ "Please make sure that the prefill instance is launched with `--load-balance-method round_robin`"
1199
+ " and `--prefill-round-robin-balance` is set for decode server."
1200
+ )
790
1201
  elif self.disaggregation_mode == "prefill":
791
1202
  if self.disaggregation_decode_tp is None:
792
1203
  self.disaggregation_decode_tp = self.tp_size
@@ -795,25 +1206,84 @@ class ServerArgs:
795
1206
 
796
1207
  self.disaggregation_prefill_pp = self.pp_size
797
1208
  self.validate_disagg_tp_size(self.tp_size, self.disaggregation_decode_tp)
798
-
799
1209
  self.disable_cuda_graph = True
800
1210
  logger.warning("Cuda graph is disabled for prefill server")
801
1211
 
802
- # Propagate env vars
1212
+ def _handle_tokenizer_batching(self):
1213
+ if self.enable_tokenizer_batch_encode and self.enable_dynamic_batch_tokenizer:
1214
+ raise ValueError(
1215
+ "Cannot enable both --enable-tokenizer-batch-encode and --enable-dynamic-batch-tokenizer. "
1216
+ "Please choose one tokenizer batching approach."
1217
+ )
1218
+
1219
+ def _handle_environment_variables(self):
803
1220
  os.environ["SGLANG_ENABLE_TORCH_COMPILE"] = (
804
1221
  "1" if self.enable_torch_compile else "0"
805
1222
  )
806
- # Set env var before grammar backends init
1223
+ os.environ["SGLANG_MAMBA_SSM_DTYPE"] = self.mamba_ssm_dtype
807
1224
  os.environ["SGLANG_DISABLE_OUTLINES_DISK_CACHE"] = (
808
1225
  "1" if self.disable_outlines_disk_cache else "0"
809
1226
  )
1227
+ os.environ["SGLANG_ENABLE_DETERMINISTIC_INFERENCE"] = (
1228
+ "1" if self.enable_deterministic_inference else "0"
1229
+ )
810
1230
 
1231
+ def _handle_cache_compatibility(self):
811
1232
  if self.enable_hierarchical_cache and self.disable_radix_cache:
812
1233
  raise ValueError(
813
1234
  "The arguments enable-hierarchical-cache and disable-radix-cache are mutually exclusive "
814
1235
  "and cannot be used at the same time. Please use only one of them."
815
1236
  )
816
1237
 
1238
+ if (
1239
+ self.disaggregation_decode_enable_offload_kvcache
1240
+ and self.disaggregation_mode != "decode"
1241
+ ):
1242
+ raise ValueError(
1243
+ "The argument disaggregation-decode-enable-offload-kvcache is only supported for decode side."
1244
+ )
1245
+
1246
+ def _handle_metrics_labels(self):
1247
+ if (
1248
+ not self.tokenizer_metrics_custom_labels_header
1249
+ and self.tokenizer_metrics_allowed_custom_labels
1250
+ ):
1251
+ raise ValueError(
1252
+ "Please set --tokenizer-metrics-custom-labels-header when setting --tokenizer-metrics-allowed-custom-labels."
1253
+ )
1254
+
1255
+ def _handle_deterministic_inference(self):
1256
+ if self.enable_deterministic_inference:
1257
+ # Check sampling backend
1258
+ self.sampling_backend = "pytorch"
1259
+ logger.warning(
1260
+ "Sampling backend is set to pytorch for deterministic inference."
1261
+ )
1262
+
1263
+ # Check attention backend
1264
+ if self.attention_backend not in DETERMINISTIC_ATTENTION_BACKEND_CHOICES:
1265
+ raise ValueError(
1266
+ f"Currently only {DETERMINISTIC_ATTENTION_BACKEND_CHOICES} attention backends are supported for deterministic inference."
1267
+ )
1268
+
1269
+ # Currently, only FA3 supports radix cache. Support for other backends is in progress
1270
+ if self.attention_backend != "fa3":
1271
+ self.disable_radix_cache = True
1272
+ logger.warning(
1273
+ f"Currently radix cache is not compatible with {self.attention_backend} attention backend for deterministic inference. It will be supported in the future."
1274
+ )
1275
+
1276
+ # Check TP size
1277
+ if self.tp_size > 1:
1278
+ os.environ["NCCL_ALGO"] = "allreduce:tree"
1279
+ self.disable_custom_all_reduce = True
1280
+ logger.warning(
1281
+ "NCCL_ALGO is set to 'allreduce:tree' and custom all reduce is disabled for deterministic inference when TP size > 1."
1282
+ )
1283
+
1284
+ def _handle_other_validations(self):
1285
+ pass
1286
+
817
1287
  @staticmethod
818
1288
  def add_cli_args(parser: argparse.ArgumentParser):
819
1289
  # Model and tokenizer
@@ -830,12 +1300,6 @@ class ServerArgs:
830
1300
  default=ServerArgs.tokenizer_path,
831
1301
  help="The path of the tokenizer.",
832
1302
  )
833
- parser.add_argument(
834
- "--tokenizer-worker-num",
835
- type=int,
836
- default=ServerArgs.tokenizer_worker_num,
837
- help="The worker num of the tokenizer manager.",
838
- )
839
1303
  parser.add_argument(
840
1304
  "--tokenizer-mode",
841
1305
  type=str,
@@ -845,6 +1309,12 @@ class ServerArgs:
845
1309
  "tokenizer if available, and 'slow' will "
846
1310
  "always use the slow tokenizer.",
847
1311
  )
1312
+ parser.add_argument(
1313
+ "--tokenizer-worker-num",
1314
+ type=int,
1315
+ default=ServerArgs.tokenizer_worker_num,
1316
+ help="The worker num of the tokenizer manager.",
1317
+ )
848
1318
  parser.add_argument(
849
1319
  "--skip-tokenizer-init",
850
1320
  action="store_true",
@@ -992,6 +1462,11 @@ class ServerArgs:
992
1462
  choices=["auto", "fp8_e5m2", "fp8_e4m3"],
993
1463
  help='Data type for kv cache storage. "auto" will use model data type. "fp8_e5m2" and "fp8_e4m3" is supported for CUDA 11.8+.',
994
1464
  )
1465
+ parser.add_argument(
1466
+ "--enable-fp32-lm-head",
1467
+ action="store_true",
1468
+ help="If set, the LM head outputs (logits) are in FP32.",
1469
+ )
995
1470
 
996
1471
  # Memory and scheduling
997
1472
  parser.add_argument(
@@ -1035,9 +1510,27 @@ class ServerArgs:
1035
1510
  "--schedule-policy",
1036
1511
  type=str,
1037
1512
  default=ServerArgs.schedule_policy,
1038
- choices=["lpm", "random", "fcfs", "dfs-weight", "lof"],
1513
+ choices=["lpm", "random", "fcfs", "dfs-weight", "lof", "priority"],
1039
1514
  help="The scheduling policy of the requests.",
1040
1515
  )
1516
+ parser.add_argument(
1517
+ "--enable-priority-scheduling",
1518
+ action="store_true",
1519
+ default=ServerArgs.enable_priority_scheduling,
1520
+ help="Enable priority scheduling. Requests with higher priority integer values will be scheduled first by default.",
1521
+ )
1522
+ parser.add_argument(
1523
+ "--schedule-low-priority-values-first",
1524
+ action="store_true",
1525
+ default=ServerArgs.schedule_low_priority_values_first,
1526
+ help="If specified with --enable-priority-scheduling, the scheduler will schedule requests with lower priority integer values first.",
1527
+ )
1528
+ parser.add_argument(
1529
+ "--priority-scheduling-preemption-threshold",
1530
+ type=int,
1531
+ default=ServerArgs.priority_scheduling_preemption_threshold,
1532
+ help="Minimum difference in priorities for an incoming request to have to preempt running request(s).",
1533
+ )
1041
1534
  parser.add_argument(
1042
1535
  "--schedule-conservativeness",
1043
1536
  type=float,
@@ -1209,6 +1702,21 @@ class ServerArgs:
1209
1702
  "to record request metrics separately. This is especially useful when dp_attention is enabled, as "
1210
1703
  "otherwise all metrics appear to come from TP 0.",
1211
1704
  )
1705
+ parser.add_argument(
1706
+ "--tokenizer-metrics-custom-labels-header",
1707
+ type=str,
1708
+ default=ServerArgs.tokenizer_metrics_custom_labels_header,
1709
+ help="Specify the HTTP header for passing custom labels for tokenizer metrics.",
1710
+ )
1711
+ parser.add_argument(
1712
+ "--tokenizer-metrics-allowed-custom-labels",
1713
+ type=str,
1714
+ nargs="+",
1715
+ default=ServerArgs.tokenizer_metrics_allowed_custom_labels,
1716
+ help="The custom labels allowed for tokenizer metrics. The labels are specified via a dict in "
1717
+ "'--tokenizer-metrics-custom-labels-header' field in HTTP requests, e.g., {'label1': 'value1', 'label2': "
1718
+ "'value2'} is allowed if '--tokenizer-metrics-allowed-custom-labels label1 label2' is set.",
1719
+ )
1212
1720
  parser.add_argument(
1213
1721
  "--bucket-time-to-first-token",
1214
1722
  type=float,
@@ -1239,8 +1747,8 @@ class ServerArgs:
1239
1747
  bucket_rule = (
1240
1748
  "Supports 3 rule types: 'default' uses predefined buckets; 'tse <middle> <base> <count>' "
1241
1749
  "generates two sides exponential distributed buckets (e.g., 'tse 1000 2 8' generates buckets "
1242
- "[984.0, 992.0, 996.0, 998.0, 1000.0, 1002.0, 1004.0, 1008.0, 1016.0]).); 'customer <value1> "
1243
- "<value2> ...' uses custom bucket values (e.g., 'customer 10 50 100 500')."
1750
+ "[984.0, 992.0, 996.0, 998.0, 1000.0, 1002.0, 1004.0, 1008.0, 1016.0]).); 'custom <value1> "
1751
+ "<value2> ...' uses custom bucket values (e.g., 'custom 10 50 100 500')."
1244
1752
  )
1245
1753
  parser.add_argument(
1246
1754
  "--prompt-tokens-buckets",
@@ -1280,6 +1788,17 @@ class ServerArgs:
1280
1788
  default=None,
1281
1789
  help="Config in json format for NVIDIA dynamo KV event publishing. Publishing will be enabled if this flag is used.",
1282
1790
  )
1791
+ parser.add_argument(
1792
+ "--enable-trace",
1793
+ action="store_true",
1794
+ help="Enable opentelemetry trace",
1795
+ )
1796
+ parser.add_argument(
1797
+ "--oltp-traces-endpoint",
1798
+ type=str,
1799
+ default="localhost:4317",
1800
+ help="Config opentelemetry collector endpoint if --enable-trace is set. format: <ip>:<port>",
1801
+ )
1283
1802
 
1284
1803
  # API related
1285
1804
  parser.add_argument(
@@ -1364,6 +1883,18 @@ class ServerArgs:
1364
1883
  "minimum_tokens",
1365
1884
  ],
1366
1885
  )
1886
+ parser.add_argument(
1887
+ "--load-watch-interval",
1888
+ type=float,
1889
+ default=ServerArgs.load_watch_interval,
1890
+ help="The interval of load watching in seconds.",
1891
+ )
1892
+ parser.add_argument(
1893
+ "--prefill-round-robin-balance",
1894
+ default=ServerArgs.prefill_round_robin_balance,
1895
+ action="store_true",
1896
+ help="Prefill is round robin balanced. This is used to promise decode server can get the correct dp rank.",
1897
+ )
1367
1898
 
1368
1899
  # Multi-node distributed serving
1369
1900
  parser.add_argument(
@@ -1438,9 +1969,17 @@ class ServerArgs:
1438
1969
  parser.add_argument(
1439
1970
  "--lora-backend",
1440
1971
  type=str,
1441
- default="triton",
1972
+ choices=LORA_BACKEND_CHOICES,
1973
+ default=ServerArgs.lora_backend,
1442
1974
  help="Choose the kernel backend for multi-LoRA serving.",
1443
1975
  )
1976
+ parser.add_argument(
1977
+ "--max-lora-chunk-size",
1978
+ type=int,
1979
+ default=ServerArgs.max_lora_chunk_size,
1980
+ choices=[16, 32, 64, 128],
1981
+ help="Maximum chunk size for the ChunkedSGMV LoRA backend. Only used when --lora-backend is 'csgmv'. Choosing a larger value might improve performance.",
1982
+ )
1444
1983
 
1445
1984
  # Kernel backend
1446
1985
  parser.add_argument(
@@ -1474,30 +2013,51 @@ class ServerArgs:
1474
2013
  parser.add_argument(
1475
2014
  "--grammar-backend",
1476
2015
  type=str,
1477
- choices=["xgrammar", "outlines", "llguidance", "none"],
2016
+ choices=GRAMMAR_BACKEND_CHOICES,
1478
2017
  default=ServerArgs.grammar_backend,
1479
2018
  help="Choose the backend for grammar-guided decoding.",
1480
2019
  )
1481
2020
  parser.add_argument(
1482
2021
  "--mm-attention-backend",
1483
2022
  type=str,
1484
- choices=["sdpa", "fa3", "triton_attn"],
2023
+ choices=["sdpa", "fa3", "triton_attn", "ascend_attn"],
1485
2024
  default=ServerArgs.mm_attention_backend,
1486
2025
  help="Set multimodal attention backend.",
1487
2026
  )
2027
+ parser.add_argument(
2028
+ "--nsa-prefill",
2029
+ default=ServerArgs.nsa_prefill,
2030
+ type=str,
2031
+ choices=NSA_CHOICES,
2032
+ )
2033
+ parser.add_argument(
2034
+ "--nsa-decode",
2035
+ default=ServerArgs.nsa_decode,
2036
+ type=str,
2037
+ choices=NSA_CHOICES,
2038
+ )
1488
2039
 
1489
2040
  # Speculative decoding
1490
2041
  parser.add_argument(
1491
2042
  "--speculative-algorithm",
1492
2043
  type=str,
1493
- choices=["EAGLE", "EAGLE3", "NEXTN"],
2044
+ choices=["EAGLE", "EAGLE3", "NEXTN", "STANDALONE", "NGRAM"],
1494
2045
  help="Speculative algorithm.",
1495
2046
  )
1496
2047
  parser.add_argument(
1497
2048
  "--speculative-draft-model-path",
2049
+ "--speculative-draft-model",
1498
2050
  type=str,
1499
2051
  help="The path of the draft model weights. This can be a local folder or a Hugging Face repo ID.",
1500
2052
  )
2053
+ parser.add_argument(
2054
+ "--speculative-draft-model-revision",
2055
+ type=str,
2056
+ default=None,
2057
+ help="The specific draft model version to use. It can be a branch "
2058
+ "name, a tag name, or a commit id. If unspecified, will use "
2059
+ "the default version.",
2060
+ )
1501
2061
  parser.add_argument(
1502
2062
  "--speculative-num-steps",
1503
2063
  type=int,
@@ -1534,6 +2094,57 @@ class ServerArgs:
1534
2094
  help="The path of the draft model's small vocab table.",
1535
2095
  default=ServerArgs.speculative_token_map,
1536
2096
  )
2097
+ parser.add_argument(
2098
+ "--speculative-attention-mode",
2099
+ type=str,
2100
+ choices=["prefill", "decode"],
2101
+ help="Attention backend for speculative decoding operations (both target verify and draft extend). Can be one of 'prefill' (default) or 'decode'.",
2102
+ default=ServerArgs.speculative_attention_mode,
2103
+ )
2104
+ # Ngram speculative decoding
2105
+ parser.add_argument(
2106
+ "--speculative-ngram-min-match-window-size",
2107
+ type=int,
2108
+ default=ServerArgs.speculative_ngram_min_match_window_size,
2109
+ help="The minimum window size for pattern matching in ngram speculative decoding.",
2110
+ )
2111
+ parser.add_argument(
2112
+ "--speculative-ngram-max-match-window-size",
2113
+ type=int,
2114
+ default=ServerArgs.speculative_ngram_max_match_window_size,
2115
+ help="The maximum window size for pattern matching in ngram speculative decoding.",
2116
+ )
2117
+ parser.add_argument(
2118
+ "--speculative-ngram-min-bfs-breadth",
2119
+ type=int,
2120
+ default=ServerArgs.speculative_ngram_min_bfs_breadth,
2121
+ help="The minimum breadth for BFS (Breadth-First Search) in ngram speculative decoding.",
2122
+ )
2123
+ parser.add_argument(
2124
+ "--speculative-ngram-max-bfs-breadth",
2125
+ type=int,
2126
+ default=ServerArgs.speculative_ngram_max_bfs_breadth,
2127
+ help="The maximum breadth for BFS (Breadth-First Search) in ngram speculative decoding.",
2128
+ )
2129
+ parser.add_argument(
2130
+ "--speculative-ngram-match-type",
2131
+ type=str,
2132
+ choices=["BFS", "PROB"],
2133
+ default=ServerArgs.speculative_ngram_match_type,
2134
+ help="The match type for cache tree.",
2135
+ )
2136
+ parser.add_argument(
2137
+ "--speculative-ngram-branch-length",
2138
+ type=int,
2139
+ default=ServerArgs.speculative_ngram_branch_length,
2140
+ help="The branch length for ngram speculative decoding.",
2141
+ )
2142
+ parser.add_argument(
2143
+ "--speculative-ngram-capacity",
2144
+ type=int,
2145
+ default=ServerArgs.speculative_ngram_capacity,
2146
+ help="The cache capacity for ngram speculative decoding.",
2147
+ )
1537
2148
 
1538
2149
  # Expert parallelism
1539
2150
  parser.add_argument(
@@ -1561,6 +2172,7 @@ class ServerArgs:
1561
2172
  "flashinfer_trtllm",
1562
2173
  "flashinfer_cutlass",
1563
2174
  "flashinfer_mxfp4",
2175
+ "flashinfer_cutedsl",
1564
2176
  ],
1565
2177
  default=ServerArgs.moe_runner_backend,
1566
2178
  help="Choose the runner backend for MoE.",
@@ -1568,7 +2180,7 @@ class ServerArgs:
1568
2180
  parser.add_argument(
1569
2181
  "--flashinfer-mxfp4-moe-precision",
1570
2182
  type=str,
1571
- choices=["mxfp4", "bf16"],
2183
+ choices=["default", "bf16"],
1572
2184
  default=ServerArgs.flashinfer_mxfp4_moe_precision,
1573
2185
  help="Choose the computation precision of flashinfer mxfp4 moe",
1574
2186
  )
@@ -1661,6 +2273,21 @@ class ServerArgs:
1661
2273
  help="TP size for MoE dense MLP layers. This flag is useful when, with large TP size, there are errors caused by weights in MLP layers having dimension smaller than the min dimension GEMM supports.",
1662
2274
  )
1663
2275
 
2276
+ # Mamba Cache
2277
+ parser.add_argument(
2278
+ "--max-mamba-cache-size",
2279
+ type=int,
2280
+ default=ServerArgs.max_mamba_cache_size,
2281
+ help="The maximum size of the mamba cache.",
2282
+ )
2283
+ parser.add_argument(
2284
+ "--mamba-ssm-dtype",
2285
+ type=str,
2286
+ default=ServerArgs.mamba_ssm_dtype,
2287
+ choices=["float32", "bfloat16"],
2288
+ help="The data type of the SSM states in mamba cache.",
2289
+ )
2290
+
1664
2291
  # Hierarchical cache
1665
2292
  parser.add_argument(
1666
2293
  "--enable-hierarchical-cache",
@@ -1686,6 +2313,13 @@ class ServerArgs:
1686
2313
  default=ServerArgs.hicache_write_policy,
1687
2314
  help="The write policy of hierarchical cache.",
1688
2315
  )
2316
+ parser.add_argument(
2317
+ "--radix-eviction-policy",
2318
+ type=str,
2319
+ choices=RADIX_EVICTION_POLICY_CHOICES,
2320
+ default=ServerArgs.radix_eviction_policy,
2321
+ help="The eviction policy of radix trees. 'lru' stands for Least Recently Used, 'lfu' stands for Least Frequently Used.",
2322
+ )
1689
2323
  parser.add_argument(
1690
2324
  "--hicache-io-backend",
1691
2325
  type=str,
@@ -1696,16 +2330,19 @@ class ServerArgs:
1696
2330
  parser.add_argument(
1697
2331
  "--hicache-mem-layout",
1698
2332
  type=str,
1699
- choices=["layer_first", "page_first"],
2333
+ choices=["layer_first", "page_first", "page_first_direct"],
1700
2334
  default=ServerArgs.hicache_mem_layout,
1701
2335
  help="The layout of host memory pool for hierarchical cache.",
1702
2336
  )
1703
2337
  parser.add_argument(
1704
2338
  "--hicache-storage-backend",
1705
2339
  type=str,
1706
- choices=["file", "mooncake", "hf3fs", "nixl"],
2340
+ choices=["file", "mooncake", "hf3fs", "nixl", "aibrix", "dynamic", "eic"],
1707
2341
  default=ServerArgs.hicache_storage_backend,
1708
- help="The storage backend for hierarchical KV cache.",
2342
+ help="The storage backend for hierarchical KV cache. "
2343
+ "Built-in backends: file, mooncake, hf3fs, nixl, aibrix. "
2344
+ "For dynamic backend, use --hicache-storage-backend-extra-config to specify: "
2345
+ "backend_name (custom name), module_path (Python module path), class_name (backend class name).",
1709
2346
  )
1710
2347
  parser.add_argument(
1711
2348
  "--hicache-storage-prefetch-policy",
@@ -1720,6 +2357,12 @@ class ServerArgs:
1720
2357
  default=ServerArgs.hicache_storage_backend_extra_config,
1721
2358
  help="A dictionary in JSON string format containing extra configuration for the storage backend.",
1722
2359
  )
2360
+ # LMCache
2361
+ parser.add_argument(
2362
+ "--enable-lmcache",
2363
+ action="store_true",
2364
+ help="Using LMCache as an alternative hierarchical cache solution",
2365
+ )
1723
2366
 
1724
2367
  # Double Sparsity
1725
2368
  parser.add_argument(
@@ -1863,6 +2506,11 @@ class ServerArgs:
1863
2506
  action="store_true",
1864
2507
  help="Enable using mscclpp for small messages for all-reduce kernel and fall back to NCCL.",
1865
2508
  )
2509
+ parser.add_argument(
2510
+ "--enable-torch-symm-mem",
2511
+ action="store_true",
2512
+ help="Enable using torch symm mem for all-reduce kernel and fall back to NCCL. Only supports CUDA device SM90 and above. SM90 supports world size 4, 6, 8. SM10 supports world size 6, 8.",
2513
+ )
1866
2514
  parser.add_argument(
1867
2515
  "--disable-overlap-schedule",
1868
2516
  action="store_true",
@@ -1888,6 +2536,11 @@ class ServerArgs:
1888
2536
  action="store_true",
1889
2537
  help="Enabling two micro batches to overlap.",
1890
2538
  )
2539
+ parser.add_argument(
2540
+ "--enable-single-batch-overlap",
2541
+ action="store_true",
2542
+ help="Let computation and communication overlap within one micro batch.",
2543
+ )
1891
2544
  parser.add_argument(
1892
2545
  "--tbo-token-distribution-threshold",
1893
2546
  type=float,
@@ -1933,6 +2586,12 @@ class ServerArgs:
1933
2586
  default=ServerArgs.triton_attention_num_kv_splits,
1934
2587
  help="The number of KV splits in flash decoding Triton kernel. Larger value is better in longer context scenarios. The default value is 8.",
1935
2588
  )
2589
+ parser.add_argument(
2590
+ "--triton-attention-split-tile-size",
2591
+ type=int,
2592
+ default=ServerArgs.triton_attention_split_tile_size,
2593
+ help="The size of split KV tile in flash decoding Triton kernel. Used for deterministic inference.",
2594
+ )
1936
2595
  parser.add_argument(
1937
2596
  "--num-continuous-decode-steps",
1938
2597
  type=int,
@@ -1951,6 +2610,11 @@ class ServerArgs:
1951
2610
  action="store_true",
1952
2611
  help="Allow saving memory using release_memory_occupation and resume_memory_occupation",
1953
2612
  )
2613
+ parser.add_argument(
2614
+ "--enable-weights-cpu-backup",
2615
+ action="store_true",
2616
+ help="Save model weights to CPU memory during release_weights_occupation and resume_weights_occupation",
2617
+ )
1954
2618
  parser.add_argument(
1955
2619
  "--allow-auto-truncate",
1956
2620
  action="store_true",
@@ -1981,6 +2645,11 @@ class ServerArgs:
1981
2645
  action="store_true",
1982
2646
  help="Adopt base image processor instead of fast image processor.",
1983
2647
  )
2648
+ parser.add_argument(
2649
+ "--keep-mm-feature-on-device",
2650
+ action="store_true",
2651
+ help="Keep multimodal feature tensors on device after processing to save D2H copy.",
2652
+ )
1984
2653
  parser.add_argument(
1985
2654
  "--enable-return-hidden-states",
1986
2655
  action="store_true",
@@ -1992,6 +2661,12 @@ class ServerArgs:
1992
2661
  default=ServerArgs.scheduler_recv_interval,
1993
2662
  help="The interval to poll requests in scheduler. Can be set to >1 to reduce the overhead of this.",
1994
2663
  )
2664
+ parser.add_argument(
2665
+ "--numa-node",
2666
+ type=int,
2667
+ nargs="+",
2668
+ help="Sets the numa node for the subprocesses. i-th element corresponds to i-th subprocess.",
2669
+ )
1995
2670
 
1996
2671
  # Debug tensor dumps
1997
2672
  parser.add_argument(
@@ -2017,12 +2692,29 @@ class ServerArgs:
2017
2692
  action="store_true",
2018
2693
  help="Only dump the tensors for prefill requests (i.e. batch size > 1).",
2019
2694
  )
2695
+ parser.add_argument(
2696
+ "--enable-dynamic-batch-tokenizer",
2697
+ action="store_true",
2698
+ help="Enable async dynamic batch tokenizer for improved performance when multiple requests arrive concurrently.",
2699
+ )
2700
+ parser.add_argument(
2701
+ "--dynamic-batch-tokenizer-batch-size",
2702
+ type=int,
2703
+ default=ServerArgs.dynamic_batch_tokenizer_batch_size,
2704
+ help="[Only used if --enable-dynamic-batch-tokenizer is set] Maximum batch size for dynamic batch tokenizer.",
2705
+ )
2706
+ parser.add_argument(
2707
+ "--dynamic-batch-tokenizer-batch-timeout",
2708
+ type=float,
2709
+ default=ServerArgs.dynamic_batch_tokenizer_batch_timeout,
2710
+ help="[Only used if --enable-dynamic-batch-tokenizer is set] Timeout in seconds for batching tokenization requests.",
2711
+ )
2020
2712
 
2021
2713
  # PD disaggregation
2022
2714
  parser.add_argument(
2023
2715
  "--disaggregation-mode",
2024
2716
  type=str,
2025
- default="null",
2717
+ default=ServerArgs.disaggregation_mode,
2026
2718
  choices=["null", "prefill", "decode"],
2027
2719
  help='Only used for PD disaggregation. "prefill" for prefill-only server, and "decode" for decode-only server. If not specified, it is not PD disaggregated',
2028
2720
  )
@@ -2065,6 +2757,11 @@ class ServerArgs:
2065
2757
  "or multiple comma-separated devices (e.g., --disaggregation-ib-device mlx5_0,mlx5_1). "
2066
2758
  "Default is None, which triggers automatic device detection when mooncake backend is enabled.",
2067
2759
  )
2760
+ parser.add_argument(
2761
+ "--disaggregation-decode-enable-offload-kvcache",
2762
+ action="store_true",
2763
+ help="Enable async KV cache offloading on decode server (PD mode).",
2764
+ )
2068
2765
  parser.add_argument(
2069
2766
  "--num-reserved-decode-tokens",
2070
2767
  type=int,
@@ -2072,10 +2769,10 @@ class ServerArgs:
2072
2769
  help="Number of decode tokens that will have memory reserved when adding new request to the running batch.",
2073
2770
  )
2074
2771
  parser.add_argument(
2075
- "--pdlb-url",
2076
- type=str,
2077
- default=None,
2078
- help="The URL of the PD disaggregation load balancer. If set, the prefill/decode server will register with the load balancer.",
2772
+ "--disaggregation-decode-polling-interval",
2773
+ type=int,
2774
+ default=ServerArgs.disaggregation_decode_polling_interval,
2775
+ help="The interval to poll requests in decode server. Can be set to >1 to reduce the overhead of this.",
2079
2776
  )
2080
2777
 
2081
2778
  # Custom weight loader
@@ -2091,6 +2788,24 @@ class ServerArgs:
2091
2788
  action="store_true",
2092
2789
  help="Disable mmap while loading weight using safetensors.",
2093
2790
  )
2791
+ parser.add_argument(
2792
+ "--remote-instance-weight-loader-seed-instance-ip",
2793
+ type=str,
2794
+ default=ServerArgs.remote_instance_weight_loader_seed_instance_ip,
2795
+ help="The ip of the seed instance for loading weights from remote instance.",
2796
+ )
2797
+ parser.add_argument(
2798
+ "--remote-instance-weight-loader-seed-instance-service-port",
2799
+ type=int,
2800
+ default=ServerArgs.remote_instance_weight_loader_seed_instance_service_port,
2801
+ help="The service port of the seed instance for loading weights from remote instance.",
2802
+ )
2803
+ parser.add_argument(
2804
+ "--remote-instance-weight-loader-send-weights-group-ports",
2805
+ type=json_list_type,
2806
+ default=ServerArgs.remote_instance_weight_loader_send_weights_group_ports,
2807
+ help="The communication group ports for loading weights from remote instance.",
2808
+ )
2094
2809
 
2095
2810
  # For PD-Multiplexing
2096
2811
  parser.add_argument(
@@ -2106,36 +2821,55 @@ class ServerArgs:
2106
2821
  help="Number of sm partition groups.",
2107
2822
  )
2108
2823
 
2824
+ # For deterministic inference
2825
+ parser.add_argument(
2826
+ "--enable-deterministic-inference",
2827
+ action="store_true",
2828
+ help="Enable deterministic inference mode with batch invariant ops.",
2829
+ )
2830
+
2109
2831
  # Deprecated arguments
2110
2832
  parser.add_argument(
2111
2833
  "--enable-ep-moe",
2112
- action="store_true",
2113
- help="(Deprecated) Enabling expert parallelism for moe. The ep size is equal to the tp size.",
2834
+ action=DeprecatedAction,
2835
+ help="NOTE: --enable-ep-moe is deprecated. Please set `--ep-size` to the same value as `--tp-size` instead.",
2114
2836
  )
2115
2837
  parser.add_argument(
2116
2838
  "--enable-deepep-moe",
2117
- action="store_true",
2118
- help="(Deprecated) Enabling DeepEP MoE implementation for EP MoE.",
2839
+ action=DeprecatedAction,
2840
+ help="NOTE: --enable-deepep-moe is deprecated. Please set `--moe-a2a-backend` to 'deepep' instead.",
2119
2841
  )
2120
2842
  parser.add_argument(
2121
2843
  "--enable-flashinfer-cutlass-moe",
2122
- action="store_true",
2123
- help="(Deprecated) Enable FlashInfer CUTLASS MoE backend for modelopt_fp4 quant on Blackwell. Supports MoE-EP",
2844
+ action=DeprecatedAction,
2845
+ help="NOTE: --enable-flashinfer-cutlass-moe is deprecated. Please set `--moe-runner-backend` to 'flashinfer_cutlass' instead.",
2846
+ )
2847
+ parser.add_argument(
2848
+ "--enable-flashinfer-cutedsl-moe",
2849
+ action=DeprecatedAction,
2850
+ help="NOTE: --enable-flashinfer-cutedsl-moe is deprecated. Please set `--moe-runner-backend` to 'flashinfer_cutedsl' instead.",
2124
2851
  )
2125
2852
  parser.add_argument(
2126
2853
  "--enable-flashinfer-trtllm-moe",
2127
- action="store_true",
2128
- help="(Deprecated) Enable FlashInfer TRTLLM MoE backend on Blackwell. Supports BlockScale FP8 MoE-EP",
2854
+ action=DeprecatedAction,
2855
+ help="NOTE: --enable-flashinfer-trtllm-moe is deprecated. Please set `--moe-runner-backend` to 'flashinfer_trtllm' instead.",
2129
2856
  )
2130
2857
  parser.add_argument(
2131
2858
  "--enable-triton-kernel-moe",
2132
- action="store_true",
2133
- help="(Deprecated) Use triton moe grouped gemm kernel.",
2859
+ action=DeprecatedAction,
2860
+ help="NOTE: --enable-triton-kernel-moe is deprecated. Please set `--moe-runner-backend` to 'triton_kernel' instead.",
2134
2861
  )
2135
2862
  parser.add_argument(
2136
2863
  "--enable-flashinfer-mxfp4-moe",
2137
- action="store_true",
2138
- help="(Deprecated) Enable FlashInfer MXFP4 MoE backend for modelopt_fp4 quant on Blackwell.",
2864
+ action=DeprecatedAction,
2865
+ help="NOTE: --enable-flashinfer-mxfp4-moe is deprecated. Please set `--moe-runner-backend` to 'flashinfer_mxfp4' instead.",
2866
+ )
2867
+
2868
+ # Configuration file support
2869
+ parser.add_argument(
2870
+ "--config",
2871
+ type=str,
2872
+ help="Read CLI options from a config file. Must be a YAML file with configuration options.",
2139
2873
  )
2140
2874
 
2141
2875
  @classmethod
@@ -2144,6 +2878,7 @@ class ServerArgs:
2144
2878
  args.pp_size = args.pipeline_parallel_size
2145
2879
  args.dp_size = args.data_parallel_size
2146
2880
  args.ep_size = args.expert_parallel_size
2881
+
2147
2882
  attrs = [attr.name for attr in dataclasses.fields(cls)]
2148
2883
  return cls(**{attr: getattr(args, attr) for attr in attrs})
2149
2884
 
@@ -2200,7 +2935,8 @@ class ServerArgs:
2200
2935
 
2201
2936
  # Check chunked prefill
2202
2937
  # Skip validation if chunked prefill is disabled (i.e., size <= 0).
2203
- if self.chunked_prefill_size > 0:
2938
+ # Skip validation if disaggregation mode is decode.
2939
+ if self.chunked_prefill_size > 0 and self.disaggregation_mode != "decode":
2204
2940
  assert (
2205
2941
  self.chunked_prefill_size % self.page_size == 0
2206
2942
  ), "chunked_prefill_size must be divisible by page_size"
@@ -2214,6 +2950,13 @@ class ServerArgs:
2214
2950
  "--generation-tokens-buckets", self.generation_tokens_buckets
2215
2951
  )
2216
2952
 
2953
+ # Check scheduling policy
2954
+ if self.enable_priority_scheduling:
2955
+ assert self.schedule_policy in [
2956
+ "fcfs",
2957
+ "lof",
2958
+ ], f"To use priority scheduling, schedule_policy must be 'fcfs' or 'lof'. '{self.schedule_policy}' is not supported."
2959
+
2217
2960
  def check_lora_server_args(self):
2218
2961
  assert self.max_loras_per_batch > 0, "max_loras_per_batch must be positive"
2219
2962
 
@@ -2297,6 +3040,12 @@ class ServerArgs:
2297
3040
  f"max_loaded_loras={self.max_loaded_loras}, lora_paths={len(self.lora_paths)}"
2298
3041
  )
2299
3042
 
3043
+ if self.max_lora_chunk_size is not None:
3044
+ assert (
3045
+ 16 <= self.max_lora_chunk_size <= 128
3046
+ and (self.max_lora_chunk_size & (self.max_lora_chunk_size - 1)) == 0
3047
+ ), "--max-lora-chunk-size must be a power of 2 between 16 and 128."
3048
+
2300
3049
  def validate_disagg_tp_size(self, prefill_tp: int, decode_tp: int):
2301
3050
  larger_tp = max(decode_tp, prefill_tp)
2302
3051
  smaller_tp = min(decode_tp, prefill_tp)
@@ -2314,8 +3063,8 @@ class ServerArgs:
2314
3063
  assert rule in [
2315
3064
  "tse",
2316
3065
  "default",
2317
- "customer",
2318
- ], f"Unsupported {arg_name} rule type: '{rule}'. Must be one of: 'tse', 'default', 'customer'"
3066
+ "custom",
3067
+ ], f"Unsupported {arg_name} rule type: '{rule}'. Must be one of: 'tse', 'default', 'custom'"
2319
3068
 
2320
3069
  if rule == "tse":
2321
3070
  assert (
@@ -2338,95 +3087,20 @@ class ServerArgs:
2338
3087
  len(buckets_rule) == 1
2339
3088
  ), f"{arg_name} default rule should only have one parameter: ['default'], got {len(buckets_rule)}"
2340
3089
 
2341
- elif rule == "customer":
3090
+ elif rule == "custom":
2342
3091
  assert (
2343
3092
  len(buckets_rule) >= 2
2344
- ), f"{arg_name} customer rule requires at least one bucket value: ['customer', value1, ...]"
3093
+ ), f"{arg_name} custom rule requires at least one bucket value: ['custom', value1, ...]"
2345
3094
  try:
2346
3095
  bucket_values = [float(x) for x in buckets_rule[1:]]
2347
3096
  except ValueError:
2348
- assert False, f"{arg_name} customer rule bucket values must be numeric"
3097
+ assert False, f"{arg_name} custom rule bucket values must be numeric"
2349
3098
  assert len(set(bucket_values)) == len(
2350
3099
  bucket_values
2351
- ), f"{arg_name} customer rule bucket values should not contain duplicates"
3100
+ ), f"{arg_name} custom rule bucket values should not contain duplicates"
2352
3101
  assert all(
2353
3102
  val >= 0 for val in bucket_values
2354
- ), f"{arg_name} customer rule bucket values should be non-negative"
2355
-
2356
- def model_specific_adjustments(self):
2357
- hf_config = self.get_hf_config()
2358
- model_arch = hf_config.architectures[0]
2359
- if model_arch in ["GptOssForCausalLM"]:
2360
- if self.attention_backend is None:
2361
- if is_cuda() and is_sm100_supported():
2362
- self.attention_backend = "trtllm_mha"
2363
- elif is_cuda() and is_sm90_supported():
2364
- self.attention_backend = "fa3"
2365
- else:
2366
- self.attention_backend = "triton"
2367
- supported_backends = ["triton", "trtllm_mha", "fa3"]
2368
- logger.info(
2369
- f"Use {self.attention_backend} as attention backend for GptOssForCausalLM"
2370
- )
2371
- assert (
2372
- self.attention_backend in supported_backends
2373
- ), f"GptOssForCausalLM requires one of {supported_backends} attention backend, but got '{self.attention_backend}'"
2374
-
2375
- if is_sm100_supported():
2376
- if not self.enable_dp_attention:
2377
- self.enable_flashinfer_allreduce_fusion = True
2378
- logger.info(
2379
- "Enable FlashInfer AllReduce Fusion on sm100 for GptOssForCausalLM"
2380
- )
2381
- quantization_config = getattr(hf_config, "quantization_config", None)
2382
- is_mxfp4_quant_format = (
2383
- quantization_config is not None
2384
- and quantization_config.get("quant_method") == "mxfp4"
2385
- )
2386
-
2387
- if is_sm100_supported() and is_mxfp4_quant_format:
2388
- self.moe_runner_backend = "flashinfer_mxfp4"
2389
- logger.warning(
2390
- "Detected SM100 and MXFP4 quantization format for GPT-OSS model, enabling FlashInfer MXFP4 MOE kernel."
2391
- )
2392
- else:
2393
- if self.moe_runner_backend == "triton_kernel":
2394
- assert (
2395
- self.ep_size == 1
2396
- ), "Triton kernel MoE is only supported when ep_size == 1"
2397
- if (
2398
- self.moe_runner_backend == "auto"
2399
- and self.ep_size == 1
2400
- and is_triton_kernels_available()
2401
- ):
2402
- self.moe_runner_backend = "triton_kernel"
2403
- logger.warning(
2404
- "Detected GPT-OSS model, enabling triton_kernels MOE kernel."
2405
- )
2406
- self.disable_hybrid_swa_memory = True
2407
- if is_mxfp4_quant_format:
2408
- # use bf16 for mxfp4 triton kernels
2409
- self.dtype = "bfloat16"
2410
-
2411
- elif "Llama4" in model_arch:
2412
- assert self.attention_backend in {
2413
- "fa3",
2414
- "aiter",
2415
- "triton",
2416
- }, "fa3, aiter, or triton is required for Llama4 model"
2417
- elif model_arch in [
2418
- "Gemma2ForCausalLM",
2419
- "Gemma3ForCausalLM",
2420
- "Gemma3ForConditionalGeneration",
2421
- "Gemma3nForCausalLM",
2422
- "Gemma3nForConditionalGeneration",
2423
- ]:
2424
- # FIXME: https://github.com/sgl-project/sglang/pull/7367 is not compatible with gemma2 model.
2425
- # It failed at this test: https://github.com/sgl-project/sglang/actions/runs/16255155597/job/45890331952#step:4:736
2426
- logger.warning(
2427
- f"Disable hybrid SWA memory for {model_arch} as it is not yet supported."
2428
- )
2429
- self.disable_hybrid_swa_memory = True
3103
+ ), f"{arg_name} custom rule bucket values should be non-negative"
2430
3104
 
2431
3105
  def adjust_mem_fraction_for_vlm(self, model_config):
2432
3106
  vision_config = getattr(model_config.hf_config, "vision_config", None)
@@ -2478,6 +3152,26 @@ def prepare_server_args(argv: List[str]) -> ServerArgs:
2478
3152
  Returns:
2479
3153
  The server arguments.
2480
3154
  """
3155
+ # Import here to avoid circular imports
3156
+ from sglang.srt.server_args_config_parser import ConfigArgumentMerger
3157
+
3158
+ # Check for config file and merge arguments if present
3159
+ if "--config" in argv:
3160
+ # Extract boolean actions from the parser to handle them correctly
3161
+ parser = argparse.ArgumentParser()
3162
+ ServerArgs.add_cli_args(parser)
3163
+
3164
+ # Get boolean action destinations
3165
+ boolean_actions = []
3166
+ for action in parser._actions:
3167
+ if hasattr(action, "dest") and hasattr(action, "action"):
3168
+ if action.action in ["store_true", "store_false"]:
3169
+ boolean_actions.append(action.dest)
3170
+
3171
+ # Merge config file arguments with CLI arguments
3172
+ config_merger = ConfigArgumentMerger(boolean_actions=boolean_actions)
3173
+ argv = config_merger.merge_config_with_args(argv)
3174
+
2481
3175
  parser = argparse.ArgumentParser()
2482
3176
  ServerArgs.add_cli_args(parser)
2483
3177
  raw_args = parser.parse_args(argv)
@@ -2612,14 +3306,19 @@ def auto_choose_speculative_params(self: ServerArgs):
2612
3306
  """
2613
3307
  hf_config = self.get_hf_config()
2614
3308
  arch = hf_config.architectures[0]
2615
-
3309
+ if self.speculative_algorithm == "STANDALONE":
3310
+ # The default value for standalone speculative decoding
3311
+ return (3, 1, 4)
2616
3312
  if arch in ["LlamaForCausalLM"]:
2617
3313
  # The default value for llama
2618
3314
  return (5, 4, 8)
2619
3315
  elif arch in [
3316
+ "DeepseekV32ForCausalLM",
2620
3317
  "DeepseekV3ForCausalLM",
2621
3318
  "DeepseekV2ForCausalLM",
2622
3319
  "GptOssForCausalLM",
3320
+ "BailingMoeForCausalLM",
3321
+ "BailingMoeV2ForCausalLM",
2623
3322
  ]:
2624
3323
  # The default value for deepseek and gpt-oss
2625
3324
  return (3, 1, 4)