sglang 0.5.3rc2__py3-none-any.whl → 0.5.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (408) hide show
  1. sglang/bench_one_batch.py +47 -28
  2. sglang/bench_one_batch_server.py +41 -25
  3. sglang/bench_serving.py +330 -156
  4. sglang/check_env.py +1 -1
  5. sglang/compile_deep_gemm.py +6 -2
  6. sglang/global_config.py +1 -25
  7. sglang/lang/api.py +6 -0
  8. sglang/lang/interpreter.py +1 -0
  9. sglang/lang/ir.py +13 -0
  10. sglang/launch_server.py +8 -15
  11. sglang/profiler.py +18 -1
  12. sglang/srt/_custom_ops.py +1 -1
  13. sglang/srt/batch_invariant_ops/batch_invariant_ops.py +4 -6
  14. sglang/srt/checkpoint_engine/checkpoint_engine_worker.py +142 -0
  15. sglang/srt/compilation/backend.py +437 -0
  16. sglang/srt/compilation/compilation_config.py +20 -0
  17. sglang/srt/compilation/compilation_counter.py +47 -0
  18. sglang/srt/compilation/compile.py +210 -0
  19. sglang/srt/compilation/compiler_interface.py +503 -0
  20. sglang/srt/compilation/cuda_piecewise_backend.py +228 -0
  21. sglang/srt/compilation/fix_functionalization.py +134 -0
  22. sglang/srt/compilation/fx_utils.py +83 -0
  23. sglang/srt/compilation/inductor_pass.py +140 -0
  24. sglang/srt/compilation/pass_manager.py +66 -0
  25. sglang/srt/compilation/piecewise_context_manager.py +40 -0
  26. sglang/srt/compilation/weak_ref_tensor_jit.py +16 -0
  27. sglang/srt/configs/__init__.py +4 -0
  28. sglang/srt/configs/deepseek_ocr.py +262 -0
  29. sglang/srt/configs/deepseekvl2.py +194 -96
  30. sglang/srt/configs/dots_vlm.py +2 -7
  31. sglang/srt/configs/falcon_h1.py +13 -64
  32. sglang/srt/configs/load_config.py +25 -2
  33. sglang/srt/configs/mamba_utils.py +117 -0
  34. sglang/srt/configs/model_config.py +134 -23
  35. sglang/srt/configs/modelopt_config.py +30 -0
  36. sglang/srt/configs/nemotron_h.py +286 -0
  37. sglang/srt/configs/olmo3.py +105 -0
  38. sglang/srt/configs/points_v15_chat.py +29 -0
  39. sglang/srt/configs/qwen3_next.py +11 -47
  40. sglang/srt/configs/qwen3_omni.py +613 -0
  41. sglang/srt/configs/qwen3_vl.py +0 -10
  42. sglang/srt/connector/remote_instance.py +1 -1
  43. sglang/srt/constrained/base_grammar_backend.py +5 -1
  44. sglang/srt/constrained/llguidance_backend.py +5 -0
  45. sglang/srt/constrained/outlines_backend.py +1 -1
  46. sglang/srt/constrained/reasoner_grammar_backend.py +9 -6
  47. sglang/srt/constrained/utils.py +12 -0
  48. sglang/srt/constrained/xgrammar_backend.py +20 -11
  49. sglang/srt/disaggregation/ascend/transfer_engine.py +1 -1
  50. sglang/srt/disaggregation/base/conn.py +17 -4
  51. sglang/srt/disaggregation/common/conn.py +4 -2
  52. sglang/srt/disaggregation/decode.py +123 -31
  53. sglang/srt/disaggregation/decode_kvcache_offload_manager.py +1 -1
  54. sglang/srt/disaggregation/fake/conn.py +11 -3
  55. sglang/srt/disaggregation/mooncake/conn.py +157 -19
  56. sglang/srt/disaggregation/nixl/conn.py +69 -24
  57. sglang/srt/disaggregation/prefill.py +96 -270
  58. sglang/srt/distributed/device_communicators/all_reduce_utils.py +4 -4
  59. sglang/srt/distributed/device_communicators/custom_all_reduce.py +6 -6
  60. sglang/srt/distributed/device_communicators/pymscclpp.py +2 -2
  61. sglang/srt/distributed/device_communicators/pynccl.py +24 -12
  62. sglang/srt/distributed/device_communicators/pynccl_allocator.py +2 -2
  63. sglang/srt/distributed/device_communicators/symm_mem.py +1 -1
  64. sglang/srt/distributed/naive_distributed.py +5 -4
  65. sglang/srt/distributed/parallel_state.py +70 -19
  66. sglang/srt/elastic_ep/elastic_ep.py +74 -0
  67. sglang/srt/entrypoints/context.py +3 -2
  68. sglang/srt/entrypoints/engine.py +66 -66
  69. sglang/srt/entrypoints/grpc_server.py +431 -234
  70. sglang/srt/entrypoints/harmony_utils.py +2 -2
  71. sglang/srt/entrypoints/http_server.py +120 -8
  72. sglang/srt/entrypoints/http_server_engine.py +1 -7
  73. sglang/srt/entrypoints/openai/protocol.py +225 -37
  74. sglang/srt/entrypoints/openai/serving_base.py +49 -2
  75. sglang/srt/entrypoints/openai/serving_chat.py +29 -74
  76. sglang/srt/entrypoints/openai/serving_classify.py +204 -0
  77. sglang/srt/entrypoints/openai/serving_completions.py +15 -1
  78. sglang/srt/entrypoints/openai/serving_responses.py +5 -2
  79. sglang/srt/entrypoints/openai/serving_tokenize.py +144 -0
  80. sglang/srt/environ.py +42 -4
  81. sglang/srt/eplb/eplb_algorithms/__init__.py +18 -1
  82. sglang/srt/eplb/eplb_algorithms/deepseek.py +0 -2
  83. sglang/srt/eplb/eplb_algorithms/elasticity_aware.py +87 -0
  84. sglang/srt/eplb/expert_distribution.py +3 -4
  85. sglang/srt/eplb/expert_location_dispatch.py +2 -2
  86. sglang/srt/eplb/expert_location_updater.py +2 -2
  87. sglang/srt/function_call/base_format_detector.py +17 -18
  88. sglang/srt/function_call/function_call_parser.py +18 -14
  89. sglang/srt/function_call/glm4_moe_detector.py +1 -5
  90. sglang/srt/function_call/gpt_oss_detector.py +1 -1
  91. sglang/srt/function_call/json_array_parser.py +0 -2
  92. sglang/srt/function_call/utils.py +2 -2
  93. sglang/srt/grpc/compile_proto.py +3 -3
  94. sglang/srt/{entrypoints → grpc}/grpc_request_manager.py +112 -52
  95. sglang/srt/grpc/health_servicer.py +189 -0
  96. sglang/srt/grpc/scheduler_launcher.py +181 -0
  97. sglang/srt/grpc/sglang_scheduler_pb2.py +78 -70
  98. sglang/srt/grpc/sglang_scheduler_pb2.pyi +66 -10
  99. sglang/srt/grpc/sglang_scheduler_pb2_grpc.py +89 -1
  100. sglang/srt/layers/activation.py +4 -1
  101. sglang/srt/layers/attention/aiter_backend.py +3 -3
  102. sglang/srt/layers/attention/ascend_backend.py +17 -1
  103. sglang/srt/layers/attention/attention_registry.py +43 -23
  104. sglang/srt/layers/attention/base_attn_backend.py +20 -1
  105. sglang/srt/layers/attention/double_sparsity_backend.py +2 -2
  106. sglang/srt/layers/attention/fla/chunk.py +0 -1
  107. sglang/srt/layers/attention/fla/chunk_o.py +1 -1
  108. sglang/srt/layers/attention/fla/index.py +0 -2
  109. sglang/srt/layers/attention/fla/layernorm_gated.py +50 -32
  110. sglang/srt/layers/attention/fla/utils.py +0 -3
  111. sglang/srt/layers/attention/fla/wy_fast.py +0 -2
  112. sglang/srt/layers/attention/flashattention_backend.py +12 -8
  113. sglang/srt/layers/attention/flashinfer_backend.py +248 -21
  114. sglang/srt/layers/attention/flashinfer_mla_backend.py +20 -18
  115. sglang/srt/layers/attention/flashmla_backend.py +2 -2
  116. sglang/srt/layers/attention/hybrid_attn_backend.py +1 -1
  117. sglang/srt/layers/attention/hybrid_linear_attn_backend.py +165 -62
  118. sglang/srt/layers/attention/intel_amx_backend.py +1 -1
  119. sglang/srt/layers/attention/mamba/causal_conv1d.py +1 -1
  120. sglang/srt/layers/attention/mamba/causal_conv1d_triton.py +9 -5
  121. sglang/srt/layers/attention/mamba/mamba.py +189 -241
  122. sglang/srt/layers/attention/mamba/mamba2_metadata.py +211 -0
  123. sglang/srt/layers/attention/mamba/mixer2_rms_norm_gated.py +120 -0
  124. sglang/srt/layers/attention/mamba/ops/ssd_bmm.py +0 -50
  125. sglang/srt/layers/attention/mamba/ops/ssd_chunk_scan.py +0 -60
  126. sglang/srt/layers/attention/mamba/ops/ssd_chunk_state.py +0 -111
  127. sglang/srt/layers/attention/mamba/ops/ssd_combined.py +0 -1
  128. sglang/srt/layers/attention/mamba/ops/ssd_state_passing.py +0 -11
  129. sglang/srt/layers/attention/npu_ops/mla_preprocess.py +1 -1
  130. sglang/srt/layers/attention/nsa/nsa_indexer.py +40 -83
  131. sglang/srt/layers/attention/nsa/triton_kernel.py +136 -0
  132. sglang/srt/layers/attention/nsa/utils.py +0 -1
  133. sglang/srt/layers/attention/nsa_backend.py +404 -90
  134. sglang/srt/layers/attention/triton_backend.py +208 -34
  135. sglang/srt/layers/attention/triton_ops/double_sparsity_attention.py +2 -2
  136. sglang/srt/layers/attention/triton_ops/extend_attention.py +539 -44
  137. sglang/srt/layers/attention/trtllm_mha_backend.py +2 -2
  138. sglang/srt/layers/attention/trtllm_mla_backend.py +361 -30
  139. sglang/srt/layers/attention/utils.py +11 -7
  140. sglang/srt/layers/attention/vision.py +3 -3
  141. sglang/srt/layers/attention/xpu_backend.py +1028 -0
  142. sglang/srt/layers/communicator.py +11 -7
  143. sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/compile_utils.py +4 -8
  144. sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/configurer.py +4 -3
  145. sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/entrypoint.py +3 -3
  146. sglang/srt/layers/dp_attention.py +17 -0
  147. sglang/srt/layers/layernorm.py +45 -15
  148. sglang/srt/layers/linear.py +9 -1
  149. sglang/srt/layers/logits_processor.py +147 -17
  150. sglang/srt/layers/modelopt_utils.py +11 -0
  151. sglang/srt/layers/moe/cutlass_moe.py +0 -2
  152. sglang/srt/layers/moe/cutlass_w4a8_moe.py +213 -21
  153. sglang/srt/layers/moe/ep_moe/kernels.py +35 -457
  154. sglang/srt/layers/moe/ep_moe/layer.py +119 -397
  155. sglang/srt/layers/moe/flashinfer_cutedsl_moe.py +1 -1
  156. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=128,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  157. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=256,N=256,device_name=NVIDIA_B200.json +146 -0
  158. sglang/srt/layers/moe/fused_moe_triton/fused_moe_triton_config.py +11 -3
  159. sglang/srt/layers/moe/fused_moe_triton/layer.py +76 -70
  160. sglang/srt/layers/moe/fused_moe_triton/triton_kernels_moe.py +18 -42
  161. sglang/srt/layers/moe/moe_runner/deep_gemm.py +304 -0
  162. sglang/srt/layers/moe/moe_runner/runner.py +3 -0
  163. sglang/srt/layers/moe/moe_runner/triton.py +3 -1
  164. sglang/srt/layers/moe/rocm_moe_utils.py +0 -1
  165. sglang/srt/layers/moe/router.py +51 -15
  166. sglang/srt/layers/moe/token_dispatcher/__init__.py +10 -0
  167. sglang/srt/layers/moe/token_dispatcher/base.py +1 -1
  168. sglang/srt/layers/moe/token_dispatcher/deepep.py +110 -97
  169. sglang/srt/layers/moe/token_dispatcher/mooncake.py +386 -0
  170. sglang/srt/layers/moe/token_dispatcher/standard.py +46 -0
  171. sglang/srt/layers/moe/topk.py +3 -2
  172. sglang/srt/layers/moe/utils.py +17 -1
  173. sglang/srt/layers/quantization/__init__.py +2 -53
  174. sglang/srt/layers/quantization/awq.py +183 -6
  175. sglang/srt/layers/quantization/awq_triton.py +29 -0
  176. sglang/srt/layers/quantization/base_config.py +20 -1
  177. sglang/srt/layers/quantization/compressed_tensors/__init__.py +7 -0
  178. sglang/srt/layers/quantization/compressed_tensors/compressed_tensors.py +20 -49
  179. sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +421 -70
  180. sglang/srt/layers/quantization/compressed_tensors/schemes/__init__.py +3 -0
  181. sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +4 -22
  182. sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +339 -0
  183. sglang/srt/layers/quantization/fp8.py +84 -18
  184. sglang/srt/layers/quantization/fp8_kernel.py +55 -10
  185. sglang/srt/layers/quantization/fp8_utils.py +42 -14
  186. sglang/srt/layers/quantization/fpgemm_fp8.py +2 -3
  187. sglang/srt/layers/quantization/gptq.py +0 -1
  188. sglang/srt/layers/quantization/int8_kernel.py +18 -2
  189. sglang/srt/layers/quantization/marlin_utils.py +12 -0
  190. sglang/srt/layers/quantization/modelopt_quant.py +125 -100
  191. sglang/srt/layers/quantization/mxfp4.py +5 -30
  192. sglang/srt/layers/quantization/petit.py +1 -1
  193. sglang/srt/layers/quantization/quark/quark.py +3 -1
  194. sglang/srt/layers/quantization/quark/quark_moe.py +3 -3
  195. sglang/srt/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +0 -7
  196. sglang/srt/layers/quantization/unquant.py +1 -4
  197. sglang/srt/layers/quantization/utils.py +0 -1
  198. sglang/srt/layers/quantization/w4afp8.py +51 -20
  199. sglang/srt/layers/quantization/w8a8_int8.py +30 -24
  200. sglang/srt/layers/radix_attention.py +59 -9
  201. sglang/srt/layers/rotary_embedding.py +673 -16
  202. sglang/srt/layers/sampler.py +36 -16
  203. sglang/srt/layers/sparse_pooler.py +98 -0
  204. sglang/srt/layers/utils.py +0 -1
  205. sglang/srt/layers/vocab_parallel_embedding.py +4 -1
  206. sglang/srt/lora/backend/triton_backend.py +0 -1
  207. sglang/srt/lora/eviction_policy.py +139 -0
  208. sglang/srt/lora/lora_manager.py +24 -9
  209. sglang/srt/lora/lora_registry.py +1 -1
  210. sglang/srt/lora/mem_pool.py +40 -16
  211. sglang/srt/lora/triton_ops/chunked_sgmv_expand.py +1 -1
  212. sglang/srt/lora/triton_ops/chunked_sgmv_shrink.py +4 -2
  213. sglang/srt/managers/cache_controller.py +48 -17
  214. sglang/srt/managers/data_parallel_controller.py +146 -42
  215. sglang/srt/managers/detokenizer_manager.py +40 -13
  216. sglang/srt/managers/io_struct.py +66 -16
  217. sglang/srt/managers/mm_utils.py +20 -18
  218. sglang/srt/managers/multi_tokenizer_mixin.py +66 -81
  219. sglang/srt/managers/overlap_utils.py +96 -19
  220. sglang/srt/managers/schedule_batch.py +241 -511
  221. sglang/srt/managers/schedule_policy.py +15 -2
  222. sglang/srt/managers/scheduler.py +399 -499
  223. sglang/srt/managers/scheduler_metrics_mixin.py +55 -8
  224. sglang/srt/managers/scheduler_output_processor_mixin.py +317 -111
  225. sglang/srt/managers/scheduler_pp_mixin.py +341 -0
  226. sglang/srt/managers/scheduler_profiler_mixin.py +57 -10
  227. sglang/srt/managers/scheduler_runtime_checker_mixin.py +217 -0
  228. sglang/srt/managers/scheduler_update_weights_mixin.py +33 -14
  229. sglang/srt/managers/tokenizer_communicator_mixin.py +71 -55
  230. sglang/srt/managers/tokenizer_manager.py +378 -90
  231. sglang/srt/managers/tp_worker.py +212 -161
  232. sglang/srt/managers/utils.py +78 -2
  233. sglang/srt/mem_cache/allocator.py +7 -2
  234. sglang/srt/mem_cache/allocator_ascend.py +2 -2
  235. sglang/srt/mem_cache/base_prefix_cache.py +2 -2
  236. sglang/srt/mem_cache/chunk_cache.py +13 -2
  237. sglang/srt/mem_cache/common.py +480 -0
  238. sglang/srt/mem_cache/evict_policy.py +16 -1
  239. sglang/srt/mem_cache/hicache_storage.py +4 -1
  240. sglang/srt/mem_cache/hiradix_cache.py +16 -3
  241. sglang/srt/mem_cache/mamba_radix_cache.py +993 -0
  242. sglang/srt/mem_cache/memory_pool.py +435 -219
  243. sglang/srt/mem_cache/memory_pool_host.py +0 -1
  244. sglang/srt/mem_cache/multimodal_cache.py +0 -1
  245. sglang/srt/mem_cache/radix_cache.py +53 -19
  246. sglang/srt/mem_cache/radix_cache_cpp.py +19 -14
  247. sglang/srt/mem_cache/storage/aibrix_kvcache/aibrix_kvcache_storage.py +8 -2
  248. sglang/srt/mem_cache/storage/aibrix_kvcache/unit_test.py +1 -13
  249. sglang/srt/mem_cache/storage/backend_factory.py +2 -2
  250. sglang/srt/mem_cache/storage/eic/eic_storage.py +5 -6
  251. sglang/srt/mem_cache/storage/hf3fs/hf3fs_client.py +0 -1
  252. sglang/srt/mem_cache/storage/hf3fs/storage_hf3fs.py +9 -3
  253. sglang/srt/mem_cache/storage/lmcache/lmc_radix_cache.py +5 -3
  254. sglang/srt/mem_cache/storage/mooncake_store/mooncake_store.py +101 -17
  255. sglang/srt/mem_cache/storage/nixl/hicache_nixl.py +38 -9
  256. sglang/srt/mem_cache/storage/nixl/nixl_utils.py +1 -1
  257. sglang/srt/mem_cache/storage/nixl/test_hicache_nixl_storage.py +17 -2
  258. sglang/srt/mem_cache/swa_radix_cache.py +92 -26
  259. sglang/srt/metrics/collector.py +31 -0
  260. sglang/srt/metrics/func_timer.py +1 -1
  261. sglang/srt/model_executor/cuda_graph_runner.py +43 -5
  262. sglang/srt/model_executor/forward_batch_info.py +28 -23
  263. sglang/srt/model_executor/model_runner.py +379 -139
  264. sglang/srt/model_executor/npu_graph_runner.py +2 -3
  265. sglang/srt/model_executor/piecewise_cuda_graph_runner.py +539 -0
  266. sglang/srt/model_loader/__init__.py +1 -1
  267. sglang/srt/model_loader/loader.py +424 -27
  268. sglang/srt/model_loader/utils.py +0 -1
  269. sglang/srt/model_loader/weight_utils.py +47 -28
  270. sglang/srt/models/apertus.py +2 -3
  271. sglang/srt/models/arcee.py +2 -2
  272. sglang/srt/models/bailing_moe.py +13 -52
  273. sglang/srt/models/bailing_moe_nextn.py +3 -4
  274. sglang/srt/models/bert.py +1 -1
  275. sglang/srt/models/deepseek_nextn.py +19 -3
  276. sglang/srt/models/deepseek_ocr.py +1516 -0
  277. sglang/srt/models/deepseek_v2.py +273 -98
  278. sglang/srt/models/dots_ocr.py +0 -2
  279. sglang/srt/models/dots_vlm.py +0 -1
  280. sglang/srt/models/dots_vlm_vit.py +1 -1
  281. sglang/srt/models/falcon_h1.py +13 -19
  282. sglang/srt/models/gemma3_mm.py +16 -0
  283. sglang/srt/models/gemma3n_mm.py +1 -2
  284. sglang/srt/models/glm4_moe.py +14 -37
  285. sglang/srt/models/glm4_moe_nextn.py +2 -2
  286. sglang/srt/models/glm4v.py +2 -1
  287. sglang/srt/models/glm4v_moe.py +5 -5
  288. sglang/srt/models/gpt_oss.py +5 -5
  289. sglang/srt/models/grok.py +10 -23
  290. sglang/srt/models/hunyuan.py +2 -7
  291. sglang/srt/models/interns1.py +0 -1
  292. sglang/srt/models/kimi_vl.py +1 -7
  293. sglang/srt/models/kimi_vl_moonvit.py +3 -1
  294. sglang/srt/models/llama.py +2 -2
  295. sglang/srt/models/llama_eagle3.py +1 -1
  296. sglang/srt/models/longcat_flash.py +5 -22
  297. sglang/srt/models/longcat_flash_nextn.py +3 -14
  298. sglang/srt/models/mimo.py +2 -13
  299. sglang/srt/models/mimo_mtp.py +1 -2
  300. sglang/srt/models/minicpmo.py +7 -5
  301. sglang/srt/models/mixtral.py +1 -4
  302. sglang/srt/models/mllama.py +1 -1
  303. sglang/srt/models/mllama4.py +13 -3
  304. sglang/srt/models/nemotron_h.py +511 -0
  305. sglang/srt/models/olmo2.py +31 -4
  306. sglang/srt/models/opt.py +5 -5
  307. sglang/srt/models/phi.py +1 -1
  308. sglang/srt/models/phi4mm.py +1 -1
  309. sglang/srt/models/phimoe.py +0 -1
  310. sglang/srt/models/pixtral.py +0 -3
  311. sglang/srt/models/points_v15_chat.py +186 -0
  312. sglang/srt/models/qwen.py +0 -1
  313. sglang/srt/models/qwen2_5_vl.py +3 -3
  314. sglang/srt/models/qwen2_audio.py +2 -15
  315. sglang/srt/models/qwen2_moe.py +15 -12
  316. sglang/srt/models/qwen2_vl.py +5 -2
  317. sglang/srt/models/qwen3_moe.py +19 -35
  318. sglang/srt/models/qwen3_next.py +7 -12
  319. sglang/srt/models/qwen3_next_mtp.py +3 -4
  320. sglang/srt/models/qwen3_omni_moe.py +661 -0
  321. sglang/srt/models/qwen3_vl.py +37 -33
  322. sglang/srt/models/qwen3_vl_moe.py +57 -185
  323. sglang/srt/models/roberta.py +55 -3
  324. sglang/srt/models/sarashina2_vision.py +0 -1
  325. sglang/srt/models/step3_vl.py +3 -5
  326. sglang/srt/models/utils.py +11 -1
  327. sglang/srt/multimodal/processors/base_processor.py +6 -2
  328. sglang/srt/multimodal/processors/deepseek_ocr.py +37 -0
  329. sglang/srt/multimodal/processors/deepseek_vl_v2.py +0 -3
  330. sglang/srt/multimodal/processors/dots_vlm.py +0 -1
  331. sglang/srt/multimodal/processors/glm4v.py +1 -5
  332. sglang/srt/multimodal/processors/internvl.py +0 -2
  333. sglang/srt/multimodal/processors/janus_pro.py +0 -1
  334. sglang/srt/multimodal/processors/mllama4.py +0 -8
  335. sglang/srt/multimodal/processors/phi4mm.py +0 -1
  336. sglang/srt/multimodal/processors/points_v15_chat.py +52 -0
  337. sglang/srt/multimodal/processors/qwen_vl.py +75 -16
  338. sglang/srt/multimodal/processors/step3_vl.py +1 -1
  339. sglang/srt/parser/conversation.py +41 -0
  340. sglang/srt/parser/reasoning_parser.py +0 -1
  341. sglang/srt/sampling/custom_logit_processor.py +77 -2
  342. sglang/srt/sampling/sampling_batch_info.py +17 -22
  343. sglang/srt/sampling/sampling_params.py +70 -2
  344. sglang/srt/server_args.py +577 -73
  345. sglang/srt/server_args_config_parser.py +1 -1
  346. sglang/srt/single_batch_overlap.py +38 -28
  347. sglang/srt/speculative/base_spec_worker.py +34 -0
  348. sglang/srt/speculative/draft_utils.py +226 -0
  349. sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +24 -7
  350. sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py +23 -2
  351. sglang/srt/speculative/eagle_info.py +57 -18
  352. sglang/srt/speculative/eagle_info_v2.py +458 -0
  353. sglang/srt/speculative/eagle_utils.py +138 -0
  354. sglang/srt/speculative/eagle_worker.py +83 -280
  355. sglang/srt/speculative/eagle_worker_v2.py +702 -0
  356. sglang/srt/speculative/{ngram_utils.py → ngram_info.py} +14 -9
  357. sglang/srt/speculative/ngram_worker.py +12 -11
  358. sglang/srt/speculative/spec_info.py +2 -0
  359. sglang/srt/speculative/spec_utils.py +38 -3
  360. sglang/srt/speculative/standalone_worker.py +4 -14
  361. sglang/srt/tokenizer/tiktoken_tokenizer.py +2 -2
  362. sglang/srt/two_batch_overlap.py +28 -14
  363. sglang/srt/utils/__init__.py +1 -1
  364. sglang/srt/{bench_utils.py → utils/bench_utils.py} +4 -2
  365. sglang/srt/utils/common.py +192 -47
  366. sglang/srt/utils/hf_transformers_utils.py +40 -17
  367. sglang/srt/{host_shared_memory.py → utils/host_shared_memory.py} +0 -1
  368. sglang/srt/{offloader.py → utils/offloader.py} +4 -4
  369. sglang/srt/utils/profile_merger.py +199 -0
  370. sglang/test/attention/test_flashattn_backend.py +1 -1
  371. sglang/test/attention/test_flashattn_mla_backend.py +0 -1
  372. sglang/test/attention/test_prefix_chunk_info.py +0 -2
  373. sglang/test/attention/test_trtllm_mla_backend.py +221 -53
  374. sglang/test/few_shot_gsm8k_engine.py +2 -4
  375. sglang/test/kit_matched_stop.py +157 -0
  376. sglang/test/longbench_v2/__init__.py +1 -0
  377. sglang/test/longbench_v2/test_longbench_v2_eval.py +238 -0
  378. sglang/test/longbench_v2/validate_longbench_v2.py +337 -0
  379. sglang/test/longbench_v2/validate_longbench_v2_standalone.py +306 -0
  380. sglang/test/run_eval.py +41 -0
  381. sglang/test/runners.py +2 -0
  382. sglang/test/send_one.py +42 -7
  383. sglang/test/simple_eval_common.py +3 -0
  384. sglang/test/simple_eval_gpqa.py +0 -1
  385. sglang/test/simple_eval_humaneval.py +0 -3
  386. sglang/test/simple_eval_longbench_v2.py +344 -0
  387. sglang/test/test_block_fp8.py +1 -2
  388. sglang/test/test_block_fp8_deep_gemm_blackwell.py +0 -1
  389. sglang/test/test_cutlass_moe.py +1 -2
  390. sglang/test/test_cutlass_w4a8_moe.py +10 -20
  391. sglang/test/test_deterministic.py +232 -99
  392. sglang/test/test_deterministic_utils.py +73 -0
  393. sglang/test/test_disaggregation_utils.py +81 -0
  394. sglang/test/test_marlin_moe.py +0 -1
  395. sglang/test/test_utils.py +85 -20
  396. sglang/version.py +1 -1
  397. {sglang-0.5.3rc2.dist-info → sglang-0.5.4.dist-info}/METADATA +45 -33
  398. {sglang-0.5.3rc2.dist-info → sglang-0.5.4.dist-info}/RECORD +404 -345
  399. sglang/srt/layers/attention/mamba/mamba_utils.py +0 -81
  400. sglang/srt/managers/tp_worker_overlap_thread.py +0 -311
  401. sglang/srt/speculative/build_eagle_tree.py +0 -427
  402. sglang/test/test_block_fp8_ep.py +0 -358
  403. /sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/__init__.py +0 -0
  404. /sglang/srt/{aio_rwlock.py → utils/aio_rwlock.py} +0 -0
  405. /sglang/srt/{torch_memory_saver_adapter.py → utils/torch_memory_saver_adapter.py} +0 -0
  406. {sglang-0.5.3rc2.dist-info → sglang-0.5.4.dist-info}/WHEEL +0 -0
  407. {sglang-0.5.3rc2.dist-info → sglang-0.5.4.dist-info}/licenses/LICENSE +0 -0
  408. {sglang-0.5.3rc2.dist-info → sglang-0.5.4.dist-info}/top_level.txt +0 -0
@@ -29,7 +29,7 @@ from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__
29
29
  from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
30
30
 
31
31
 
32
- DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16sglang_scheduler.proto\x12\x15sglang.grpc.scheduler\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xe1\x05\n\x0eSamplingParams\x12\x13\n\x0btemperature\x18\x01 \x01(\x02\x12\r\n\x05top_p\x18\x02 \x01(\x02\x12\r\n\x05top_k\x18\x03 \x01(\x05\x12\r\n\x05min_p\x18\x04 \x01(\x02\x12\x19\n\x11\x66requency_penalty\x18\x05 \x01(\x02\x12\x18\n\x10presence_penalty\x18\x06 \x01(\x02\x12\x1a\n\x12repetition_penalty\x18\x07 \x01(\x02\x12\x1b\n\x0emax_new_tokens\x18\x08 \x01(\x05H\x01\x88\x01\x01\x12\x0c\n\x04stop\x18\t \x03(\t\x12\x16\n\x0estop_token_ids\x18\n \x03(\r\x12\x1b\n\x13skip_special_tokens\x18\x0b \x01(\x08\x12%\n\x1dspaces_between_special_tokens\x18\x0c \x01(\x08\x12\x0f\n\x05regex\x18\r \x01(\tH\x00\x12\x15\n\x0bjson_schema\x18\x0e \x01(\tH\x00\x12\x16\n\x0c\x65\x62nf_grammar\x18\x0f \x01(\tH\x00\x12\x18\n\x0estructural_tag\x18\x10 \x01(\tH\x00\x12\x11\n\tlora_path\x18\x11 \x01(\t\x12\t\n\x01n\x18\x12 \x01(\x05\x12\x15\n\rtoken_healing\x18\x13 \x01(\x08\x12\x16\n\x0emin_new_tokens\x18\x14 \x01(\x05\x12\x12\n\nignore_eos\x18\x15 \x01(\x08\x12\x14\n\x0cno_stop_trim\x18\x16 \x01(\x08\x12\x17\n\x0fstream_interval\x18\x17 \x01(\x05\x12H\n\nlogit_bias\x18\x18 \x03(\x0b\x32\x34.sglang.grpc.scheduler.SamplingParams.LogitBiasEntry\x12.\n\rcustom_params\x18\x19 \x01(\x0b\x32\x17.google.protobuf.Struct\x1a\x30\n\x0eLogitBiasEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x42\x0c\n\nconstraintB\x11\n\x0f_max_new_tokens\"]\n\x13\x44isaggregatedParams\x12\x16\n\x0e\x62ootstrap_host\x18\x01 \x01(\t\x12\x16\n\x0e\x62ootstrap_port\x18\x02 \x01(\x05\x12\x16\n\x0e\x62ootstrap_room\x18\x03 \x01(\x05\"\xe2\x04\n\x0fGenerateRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x38\n\ttokenized\x18\x02 \x01(\x0b\x32%.sglang.grpc.scheduler.TokenizedInput\x12:\n\tmm_inputs\x18\x03 \x01(\x0b\x32\'.sglang.grpc.scheduler.MultimodalInputs\x12>\n\x0fsampling_params\x18\x04 \x01(\x0b\x32%.sglang.grpc.scheduler.SamplingParams\x12\x16\n\x0ereturn_logprob\x18\x05 \x01(\x08\x12\x19\n\x11logprob_start_len\x18\x06 \x01(\x05\x12\x18\n\x10top_logprobs_num\x18\x07 \x01(\x05\x12\x19\n\x11token_ids_logprob\x18\x08 \x03(\r\x12\x1c\n\x14return_hidden_states\x18\t \x01(\x08\x12H\n\x14\x64isaggregated_params\x18\n \x01(\x0b\x32*.sglang.grpc.scheduler.DisaggregatedParams\x12\x1e\n\x16\x63ustom_logit_processor\x18\x0b \x01(\t\x12-\n\ttimestamp\x18\x0c \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x13\n\x0blog_metrics\x18\r \x01(\x08\x12\x14\n\x0cinput_embeds\x18\x0e \x03(\x02\x12\x0f\n\x07lora_id\x18\x0f \x01(\t\x12\x1a\n\x12\x64\x61ta_parallel_rank\x18\x10 \x01(\x05\x12\x0e\n\x06stream\x18\x11 \x01(\x08\":\n\x0eTokenizedInput\x12\x15\n\roriginal_text\x18\x01 \x01(\t\x12\x11\n\tinput_ids\x18\x02 \x03(\r\"\xd3\x01\n\x10MultimodalInputs\x12\x12\n\nimage_urls\x18\x01 \x03(\t\x12\x12\n\nvideo_urls\x18\x02 \x03(\t\x12\x12\n\naudio_urls\x18\x03 \x03(\t\x12\x33\n\x12processed_features\x18\x04 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x12\n\nimage_data\x18\x05 \x03(\x0c\x12\x12\n\nvideo_data\x18\x06 \x03(\x0c\x12\x12\n\naudio_data\x18\x07 \x03(\x0c\x12\x12\n\nmodalities\x18\x08 \x03(\t\"\xe3\x01\n\x10GenerateResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12;\n\x05\x63hunk\x18\x02 \x01(\x0b\x32*.sglang.grpc.scheduler.GenerateStreamChunkH\x00\x12;\n\x08\x63omplete\x18\x03 \x01(\x0b\x32\'.sglang.grpc.scheduler.GenerateCompleteH\x00\x12\x35\n\x05\x65rror\x18\x04 \x01(\x0b\x32$.sglang.grpc.scheduler.GenerateErrorH\x00\x42\n\n\x08response\"\x95\x02\n\x13GenerateStreamChunk\x12\x11\n\ttoken_ids\x18\x01 \x03(\r\x12\x15\n\rprompt_tokens\x18\x02 \x01(\x05\x12\x19\n\x11\x63ompletion_tokens\x18\x03 \x01(\x05\x12\x15\n\rcached_tokens\x18\x04 \x01(\x05\x12>\n\x0foutput_logprobs\x18\x05 \x01(\x0b\x32%.sglang.grpc.scheduler.OutputLogProbs\x12\x15\n\rhidden_states\x18\x06 \x03(\x02\x12<\n\x0einput_logprobs\x18\x07 \x01(\x0b\x32$.sglang.grpc.scheduler.InputLogProbs\x12\r\n\x05index\x18\x08 \x01(\r\"\x9b\x03\n\x10GenerateComplete\x12\x12\n\noutput_ids\x18\x01 \x03(\r\x12\x15\n\rfinish_reason\x18\x02 \x01(\t\x12\x15\n\rprompt_tokens\x18\x03 \x01(\x05\x12\x19\n\x11\x63ompletion_tokens\x18\x04 \x01(\x05\x12\x15\n\rcached_tokens\x18\x05 \x01(\x05\x12>\n\x0foutput_logprobs\x18\x06 \x01(\x0b\x32%.sglang.grpc.scheduler.OutputLogProbs\x12>\n\x11\x61ll_hidden_states\x18\x07 \x03(\x0b\x32#.sglang.grpc.scheduler.HiddenStates\x12\x1a\n\x10matched_token_id\x18\x08 \x01(\rH\x00\x12\x1a\n\x10matched_stop_str\x18\t \x01(\tH\x00\x12<\n\x0einput_logprobs\x18\n \x01(\x0b\x32$.sglang.grpc.scheduler.InputLogProbs\x12\r\n\x05index\x18\x0b \x01(\rB\x0e\n\x0cmatched_stop\"K\n\rGenerateError\x12\x0f\n\x07message\x18\x01 \x01(\t\x12\x18\n\x10http_status_code\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x65tails\x18\x03 \x01(\t\"u\n\x0eOutputLogProbs\x12\x16\n\x0etoken_logprobs\x18\x01 \x03(\x02\x12\x11\n\ttoken_ids\x18\x02 \x03(\x05\x12\x38\n\x0ctop_logprobs\x18\x03 \x03(\x0b\x32\".sglang.grpc.scheduler.TopLogProbs\"\x9e\x01\n\rInputLogProbs\x12@\n\x0etoken_logprobs\x18\x01 \x03(\x0b\x32(.sglang.grpc.scheduler.InputTokenLogProb\x12\x11\n\ttoken_ids\x18\x02 \x03(\x05\x12\x38\n\x0ctop_logprobs\x18\x03 \x03(\x0b\x32\".sglang.grpc.scheduler.TopLogProbs\"1\n\x11InputTokenLogProb\x12\x12\n\x05value\x18\x01 \x01(\x02H\x00\x88\x01\x01\x42\x08\n\x06_value\"0\n\x0bTopLogProbs\x12\x0e\n\x06values\x18\x01 \x03(\x02\x12\x11\n\ttoken_ids\x18\x02 \x03(\x05\"?\n\x0cHiddenStates\x12\x0e\n\x06values\x18\x01 \x03(\x02\x12\r\n\x05layer\x18\x02 \x01(\x05\x12\x10\n\x08position\x18\x03 \x01(\x05\"\xca\x02\n\x0c\x45mbedRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x38\n\ttokenized\x18\x02 \x01(\x0b\x32%.sglang.grpc.scheduler.TokenizedInput\x12:\n\tmm_inputs\x18\x04 \x01(\x0b\x32\'.sglang.grpc.scheduler.MultimodalInputs\x12>\n\x0fsampling_params\x18\x05 \x01(\x0b\x32%.sglang.grpc.scheduler.SamplingParams\x12\x13\n\x0blog_metrics\x18\x06 \x01(\x08\x12\x16\n\x0etoken_type_ids\x18\x07 \x03(\x05\x12\x1a\n\x12\x64\x61ta_parallel_rank\x18\x08 \x01(\x05\x12\x18\n\x10is_cross_encoder\x18\t \x01(\x08\x12\r\n\x05texts\x18\n \x03(\t\"\x9d\x01\n\rEmbedResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x38\n\x08\x63omplete\x18\x02 \x01(\x0b\x32$.sglang.grpc.scheduler.EmbedCompleteH\x00\x12\x32\n\x05\x65rror\x18\x03 \x01(\x0b\x32!.sglang.grpc.scheduler.EmbedErrorH\x00\x42\n\n\x08response\"\xa3\x01\n\rEmbedComplete\x12\x11\n\tembedding\x18\x01 \x03(\x02\x12\x15\n\rprompt_tokens\x18\x02 \x01(\x05\x12\x15\n\rcached_tokens\x18\x03 \x01(\x05\x12\x15\n\rembedding_dim\x18\x04 \x01(\x05\x12:\n\x10\x62\x61tch_embeddings\x18\x05 \x03(\x0b\x32 .sglang.grpc.scheduler.Embedding\"*\n\tEmbedding\x12\x0e\n\x06values\x18\x01 \x03(\x02\x12\r\n\x05index\x18\x02 \x01(\x05\"<\n\nEmbedError\x12\x0f\n\x07message\x18\x01 \x01(\t\x12\x0c\n\x04\x63ode\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x65tails\x18\x03 \x01(\t\"N\n\x12HealthCheckRequest\x12\x38\n\ttokenized\x18\x01 \x01(\x0b\x32%.sglang.grpc.scheduler.TokenizedInput\"7\n\x13HealthCheckResponse\x12\x0f\n\x07healthy\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"2\n\x0c\x41\x62ortRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0e\n\x06reason\x18\x02 \x01(\t\"1\n\rAbortResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"I\n\x0fLoadLoRARequest\x12\x12\n\nadapter_id\x18\x01 \x01(\t\x12\x14\n\x0c\x61\x64\x61pter_path\x18\x02 \x01(\t\x12\x0c\n\x04rank\x18\x03 \x01(\x05\"H\n\x10LoadLoRAResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x12\n\nadapter_id\x18\x02 \x01(\t\x12\x0f\n\x07message\x18\x03 \x01(\t\"\'\n\x11UnloadLoRARequest\x12\x12\n\nadapter_id\x18\x01 \x01(\t\"6\n\x12UnloadLoRAResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"w\n\x14UpdateWeightsRequest\x12\x13\n\tdisk_path\x18\x01 \x01(\tH\x00\x12\x15\n\x0btensor_data\x18\x02 \x01(\x0cH\x00\x12\x14\n\nremote_url\x18\x03 \x01(\tH\x00\x12\x13\n\x0bweight_name\x18\x04 \x01(\tB\x08\n\x06source\"9\n\x15UpdateWeightsResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"-\n\x17GetInternalStateRequest\x12\x12\n\nstate_keys\x18\x01 \x03(\t\"B\n\x18GetInternalStateResponse\x12&\n\x05state\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\"A\n\x17SetInternalStateRequest\x12&\n\x05state\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\"<\n\x18SetInternalStateResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t2\xfe\x02\n\x0fSglangScheduler\x12]\n\x08Generate\x12&.sglang.grpc.scheduler.GenerateRequest\x1a\'.sglang.grpc.scheduler.GenerateResponse0\x01\x12R\n\x05\x45mbed\x12#.sglang.grpc.scheduler.EmbedRequest\x1a$.sglang.grpc.scheduler.EmbedResponse\x12\x64\n\x0bHealthCheck\x12).sglang.grpc.scheduler.HealthCheckRequest\x1a*.sglang.grpc.scheduler.HealthCheckResponse\x12R\n\x05\x41\x62ort\x12#.sglang.grpc.scheduler.AbortRequest\x1a$.sglang.grpc.scheduler.AbortResponseb\x06proto3')
32
+ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16sglang_scheduler.proto\x12\x15sglang.grpc.scheduler\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xd0\x05\n\x0eSamplingParams\x12\x13\n\x0btemperature\x18\x01 \x01(\x02\x12\r\n\x05top_p\x18\x02 \x01(\x02\x12\r\n\x05top_k\x18\x03 \x01(\x05\x12\r\n\x05min_p\x18\x04 \x01(\x02\x12\x19\n\x11\x66requency_penalty\x18\x05 \x01(\x02\x12\x18\n\x10presence_penalty\x18\x06 \x01(\x02\x12\x1a\n\x12repetition_penalty\x18\x07 \x01(\x02\x12\x1b\n\x0emax_new_tokens\x18\x08 \x01(\x05H\x01\x88\x01\x01\x12\x0c\n\x04stop\x18\t \x03(\t\x12\x16\n\x0estop_token_ids\x18\n \x03(\r\x12\x1b\n\x13skip_special_tokens\x18\x0b \x01(\x08\x12%\n\x1dspaces_between_special_tokens\x18\x0c \x01(\x08\x12\x0f\n\x05regex\x18\r \x01(\tH\x00\x12\x15\n\x0bjson_schema\x18\x0e \x01(\tH\x00\x12\x16\n\x0c\x65\x62nf_grammar\x18\x0f \x01(\tH\x00\x12\x18\n\x0estructural_tag\x18\x10 \x01(\tH\x00\x12\t\n\x01n\x18\x11 \x01(\x05\x12\x16\n\x0emin_new_tokens\x18\x12 \x01(\x05\x12\x12\n\nignore_eos\x18\x13 \x01(\x08\x12\x14\n\x0cno_stop_trim\x18\x14 \x01(\x08\x12\x1c\n\x0fstream_interval\x18\x15 \x01(\x05H\x02\x88\x01\x01\x12H\n\nlogit_bias\x18\x16 \x03(\x0b\x32\x34.sglang.grpc.scheduler.SamplingParams.LogitBiasEntry\x12.\n\rcustom_params\x18\x17 \x01(\x0b\x32\x17.google.protobuf.Struct\x1a\x30\n\x0eLogitBiasEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x42\x0c\n\nconstraintB\x11\n\x0f_max_new_tokensB\x12\n\x10_stream_interval\"]\n\x13\x44isaggregatedParams\x12\x16\n\x0e\x62ootstrap_host\x18\x01 \x01(\t\x12\x16\n\x0e\x62ootstrap_port\x18\x02 \x01(\x05\x12\x16\n\x0e\x62ootstrap_room\x18\x03 \x01(\x05\"\xe2\x04\n\x0fGenerateRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x38\n\ttokenized\x18\x02 \x01(\x0b\x32%.sglang.grpc.scheduler.TokenizedInput\x12:\n\tmm_inputs\x18\x03 \x01(\x0b\x32\'.sglang.grpc.scheduler.MultimodalInputs\x12>\n\x0fsampling_params\x18\x04 \x01(\x0b\x32%.sglang.grpc.scheduler.SamplingParams\x12\x16\n\x0ereturn_logprob\x18\x05 \x01(\x08\x12\x19\n\x11logprob_start_len\x18\x06 \x01(\x05\x12\x18\n\x10top_logprobs_num\x18\x07 \x01(\x05\x12\x19\n\x11token_ids_logprob\x18\x08 \x03(\r\x12\x1c\n\x14return_hidden_states\x18\t \x01(\x08\x12H\n\x14\x64isaggregated_params\x18\n \x01(\x0b\x32*.sglang.grpc.scheduler.DisaggregatedParams\x12\x1e\n\x16\x63ustom_logit_processor\x18\x0b \x01(\t\x12-\n\ttimestamp\x18\x0c \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x13\n\x0blog_metrics\x18\r \x01(\x08\x12\x14\n\x0cinput_embeds\x18\x0e \x03(\x02\x12\x0f\n\x07lora_id\x18\x0f \x01(\t\x12\x1a\n\x12\x64\x61ta_parallel_rank\x18\x10 \x01(\x05\x12\x0e\n\x06stream\x18\x11 \x01(\x08\":\n\x0eTokenizedInput\x12\x15\n\roriginal_text\x18\x01 \x01(\t\x12\x11\n\tinput_ids\x18\x02 \x03(\r\"\xd3\x01\n\x10MultimodalInputs\x12\x12\n\nimage_urls\x18\x01 \x03(\t\x12\x12\n\nvideo_urls\x18\x02 \x03(\t\x12\x12\n\naudio_urls\x18\x03 \x03(\t\x12\x33\n\x12processed_features\x18\x04 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x12\n\nimage_data\x18\x05 \x03(\x0c\x12\x12\n\nvideo_data\x18\x06 \x03(\x0c\x12\x12\n\naudio_data\x18\x07 \x03(\x0c\x12\x12\n\nmodalities\x18\x08 \x03(\t\"\xe3\x01\n\x10GenerateResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12;\n\x05\x63hunk\x18\x02 \x01(\x0b\x32*.sglang.grpc.scheduler.GenerateStreamChunkH\x00\x12;\n\x08\x63omplete\x18\x03 \x01(\x0b\x32\'.sglang.grpc.scheduler.GenerateCompleteH\x00\x12\x35\n\x05\x65rror\x18\x04 \x01(\x0b\x32$.sglang.grpc.scheduler.GenerateErrorH\x00\x42\n\n\x08response\"\x95\x02\n\x13GenerateStreamChunk\x12\x11\n\ttoken_ids\x18\x01 \x03(\r\x12\x15\n\rprompt_tokens\x18\x02 \x01(\x05\x12\x19\n\x11\x63ompletion_tokens\x18\x03 \x01(\x05\x12\x15\n\rcached_tokens\x18\x04 \x01(\x05\x12>\n\x0foutput_logprobs\x18\x05 \x01(\x0b\x32%.sglang.grpc.scheduler.OutputLogProbs\x12\x15\n\rhidden_states\x18\x06 \x03(\x02\x12<\n\x0einput_logprobs\x18\x07 \x01(\x0b\x32$.sglang.grpc.scheduler.InputLogProbs\x12\r\n\x05index\x18\x08 \x01(\r\"\x9b\x03\n\x10GenerateComplete\x12\x12\n\noutput_ids\x18\x01 \x03(\r\x12\x15\n\rfinish_reason\x18\x02 \x01(\t\x12\x15\n\rprompt_tokens\x18\x03 \x01(\x05\x12\x19\n\x11\x63ompletion_tokens\x18\x04 \x01(\x05\x12\x15\n\rcached_tokens\x18\x05 \x01(\x05\x12>\n\x0foutput_logprobs\x18\x06 \x01(\x0b\x32%.sglang.grpc.scheduler.OutputLogProbs\x12>\n\x11\x61ll_hidden_states\x18\x07 \x03(\x0b\x32#.sglang.grpc.scheduler.HiddenStates\x12\x1a\n\x10matched_token_id\x18\x08 \x01(\rH\x00\x12\x1a\n\x10matched_stop_str\x18\t \x01(\tH\x00\x12<\n\x0einput_logprobs\x18\n \x01(\x0b\x32$.sglang.grpc.scheduler.InputLogProbs\x12\r\n\x05index\x18\x0b \x01(\rB\x0e\n\x0cmatched_stop\"K\n\rGenerateError\x12\x0f\n\x07message\x18\x01 \x01(\t\x12\x18\n\x10http_status_code\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x65tails\x18\x03 \x01(\t\"u\n\x0eOutputLogProbs\x12\x16\n\x0etoken_logprobs\x18\x01 \x03(\x02\x12\x11\n\ttoken_ids\x18\x02 \x03(\x05\x12\x38\n\x0ctop_logprobs\x18\x03 \x03(\x0b\x32\".sglang.grpc.scheduler.TopLogProbs\"\x9e\x01\n\rInputLogProbs\x12@\n\x0etoken_logprobs\x18\x01 \x03(\x0b\x32(.sglang.grpc.scheduler.InputTokenLogProb\x12\x11\n\ttoken_ids\x18\x02 \x03(\x05\x12\x38\n\x0ctop_logprobs\x18\x03 \x03(\x0b\x32\".sglang.grpc.scheduler.TopLogProbs\"1\n\x11InputTokenLogProb\x12\x12\n\x05value\x18\x01 \x01(\x02H\x00\x88\x01\x01\x42\x08\n\x06_value\"0\n\x0bTopLogProbs\x12\x0e\n\x06values\x18\x01 \x03(\x02\x12\x11\n\ttoken_ids\x18\x02 \x03(\x05\"?\n\x0cHiddenStates\x12\x0e\n\x06values\x18\x01 \x03(\x02\x12\r\n\x05layer\x18\x02 \x01(\x05\x12\x10\n\x08position\x18\x03 \x01(\x05\"\xca\x02\n\x0c\x45mbedRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x38\n\ttokenized\x18\x02 \x01(\x0b\x32%.sglang.grpc.scheduler.TokenizedInput\x12:\n\tmm_inputs\x18\x04 \x01(\x0b\x32\'.sglang.grpc.scheduler.MultimodalInputs\x12>\n\x0fsampling_params\x18\x05 \x01(\x0b\x32%.sglang.grpc.scheduler.SamplingParams\x12\x13\n\x0blog_metrics\x18\x06 \x01(\x08\x12\x16\n\x0etoken_type_ids\x18\x07 \x03(\x05\x12\x1a\n\x12\x64\x61ta_parallel_rank\x18\x08 \x01(\x05\x12\x18\n\x10is_cross_encoder\x18\t \x01(\x08\x12\r\n\x05texts\x18\n \x03(\t\"\x9d\x01\n\rEmbedResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x38\n\x08\x63omplete\x18\x02 \x01(\x0b\x32$.sglang.grpc.scheduler.EmbedCompleteH\x00\x12\x32\n\x05\x65rror\x18\x03 \x01(\x0b\x32!.sglang.grpc.scheduler.EmbedErrorH\x00\x42\n\n\x08response\"\xa3\x01\n\rEmbedComplete\x12\x11\n\tembedding\x18\x01 \x03(\x02\x12\x15\n\rprompt_tokens\x18\x02 \x01(\x05\x12\x15\n\rcached_tokens\x18\x03 \x01(\x05\x12\x15\n\rembedding_dim\x18\x04 \x01(\x05\x12:\n\x10\x62\x61tch_embeddings\x18\x05 \x03(\x0b\x32 .sglang.grpc.scheduler.Embedding\"*\n\tEmbedding\x12\x0e\n\x06values\x18\x01 \x03(\x02\x12\r\n\x05index\x18\x02 \x01(\x05\"<\n\nEmbedError\x12\x0f\n\x07message\x18\x01 \x01(\t\x12\x0c\n\x04\x63ode\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x65tails\x18\x03 \x01(\t\"\x14\n\x12HealthCheckRequest\"7\n\x13HealthCheckResponse\x12\x0f\n\x07healthy\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"2\n\x0c\x41\x62ortRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0e\n\x06reason\x18\x02 \x01(\t\"1\n\rAbortResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"I\n\x0fLoadLoRARequest\x12\x12\n\nadapter_id\x18\x01 \x01(\t\x12\x14\n\x0c\x61\x64\x61pter_path\x18\x02 \x01(\t\x12\x0c\n\x04rank\x18\x03 \x01(\x05\"H\n\x10LoadLoRAResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x12\n\nadapter_id\x18\x02 \x01(\t\x12\x0f\n\x07message\x18\x03 \x01(\t\"\'\n\x11UnloadLoRARequest\x12\x12\n\nadapter_id\x18\x01 \x01(\t\"6\n\x12UnloadLoRAResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"w\n\x14UpdateWeightsRequest\x12\x13\n\tdisk_path\x18\x01 \x01(\tH\x00\x12\x15\n\x0btensor_data\x18\x02 \x01(\x0cH\x00\x12\x14\n\nremote_url\x18\x03 \x01(\tH\x00\x12\x13\n\x0bweight_name\x18\x04 \x01(\tB\x08\n\x06source\"9\n\x15UpdateWeightsResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"-\n\x17GetInternalStateRequest\x12\x12\n\nstate_keys\x18\x01 \x03(\t\"B\n\x18GetInternalStateResponse\x12&\n\x05state\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\"A\n\x17SetInternalStateRequest\x12&\n\x05state\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\"<\n\x18SetInternalStateResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"\x15\n\x13GetModelInfoRequest\"\xea\x02\n\x14GetModelInfoResponse\x12\x12\n\nmodel_path\x18\x01 \x01(\t\x12\x16\n\x0etokenizer_path\x18\x02 \x01(\t\x12\x15\n\ris_generation\x18\x03 \x01(\x08\x12!\n\x19preferred_sampling_params\x18\x04 \x01(\t\x12\x16\n\x0eweight_version\x18\x05 \x01(\t\x12\x19\n\x11served_model_name\x18\x06 \x01(\t\x12\x1a\n\x12max_context_length\x18\x07 \x01(\x05\x12\x12\n\nvocab_size\x18\x08 \x01(\x05\x12\x17\n\x0fsupports_vision\x18\t \x01(\x08\x12\x12\n\nmodel_type\x18\n \x01(\t\x12\x15\n\reos_token_ids\x18\x0b \x03(\x05\x12\x14\n\x0cpad_token_id\x18\x0c \x01(\x05\x12\x14\n\x0c\x62os_token_id\x18\r \x01(\x05\x12\x19\n\x11max_req_input_len\x18\x0e \x01(\x05\"\x16\n\x14GetServerInfoRequest\"\xb7\x02\n\x15GetServerInfoResponse\x12,\n\x0bserver_args\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12/\n\x0escheduler_info\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x17\n\x0f\x61\x63tive_requests\x18\x03 \x01(\x05\x12\x11\n\tis_paused\x18\x04 \x01(\x08\x12\x1e\n\x16last_receive_timestamp\x18\x05 \x01(\x01\x12\x16\n\x0euptime_seconds\x18\x06 \x01(\x01\x12\x16\n\x0esglang_version\x18\x07 \x01(\t\x12\x13\n\x0bserver_type\x18\x08 \x01(\t\x12.\n\nstart_time\x18\t \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\xd3\x04\n\x0fSglangScheduler\x12]\n\x08Generate\x12&.sglang.grpc.scheduler.GenerateRequest\x1a\'.sglang.grpc.scheduler.GenerateResponse0\x01\x12R\n\x05\x45mbed\x12#.sglang.grpc.scheduler.EmbedRequest\x1a$.sglang.grpc.scheduler.EmbedResponse\x12\x64\n\x0bHealthCheck\x12).sglang.grpc.scheduler.HealthCheckRequest\x1a*.sglang.grpc.scheduler.HealthCheckResponse\x12R\n\x05\x41\x62ort\x12#.sglang.grpc.scheduler.AbortRequest\x1a$.sglang.grpc.scheduler.AbortResponse\x12g\n\x0cGetModelInfo\x12*.sglang.grpc.scheduler.GetModelInfoRequest\x1a+.sglang.grpc.scheduler.GetModelInfoResponse\x12j\n\rGetServerInfo\x12+.sglang.grpc.scheduler.GetServerInfoRequest\x1a,.sglang.grpc.scheduler.GetServerInfoResponseb\x06proto3')
33
33
 
34
34
  _globals = globals()
35
35
  _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
@@ -39,73 +39,81 @@ if not _descriptor._USE_C_DESCRIPTORS:
39
39
  _globals['_SAMPLINGPARAMS_LOGITBIASENTRY']._loaded_options = None
40
40
  _globals['_SAMPLINGPARAMS_LOGITBIASENTRY']._serialized_options = b'8\001'
41
41
  _globals['_SAMPLINGPARAMS']._serialized_start=113
42
- _globals['_SAMPLINGPARAMS']._serialized_end=850
43
- _globals['_SAMPLINGPARAMS_LOGITBIASENTRY']._serialized_start=769
44
- _globals['_SAMPLINGPARAMS_LOGITBIASENTRY']._serialized_end=817
45
- _globals['_DISAGGREGATEDPARAMS']._serialized_start=852
46
- _globals['_DISAGGREGATEDPARAMS']._serialized_end=945
47
- _globals['_GENERATEREQUEST']._serialized_start=948
48
- _globals['_GENERATEREQUEST']._serialized_end=1558
49
- _globals['_TOKENIZEDINPUT']._serialized_start=1560
50
- _globals['_TOKENIZEDINPUT']._serialized_end=1618
51
- _globals['_MULTIMODALINPUTS']._serialized_start=1621
52
- _globals['_MULTIMODALINPUTS']._serialized_end=1832
53
- _globals['_GENERATERESPONSE']._serialized_start=1835
54
- _globals['_GENERATERESPONSE']._serialized_end=2062
55
- _globals['_GENERATESTREAMCHUNK']._serialized_start=2065
56
- _globals['_GENERATESTREAMCHUNK']._serialized_end=2342
57
- _globals['_GENERATECOMPLETE']._serialized_start=2345
58
- _globals['_GENERATECOMPLETE']._serialized_end=2756
59
- _globals['_GENERATEERROR']._serialized_start=2758
60
- _globals['_GENERATEERROR']._serialized_end=2833
61
- _globals['_OUTPUTLOGPROBS']._serialized_start=2835
62
- _globals['_OUTPUTLOGPROBS']._serialized_end=2952
63
- _globals['_INPUTLOGPROBS']._serialized_start=2955
64
- _globals['_INPUTLOGPROBS']._serialized_end=3113
65
- _globals['_INPUTTOKENLOGPROB']._serialized_start=3115
66
- _globals['_INPUTTOKENLOGPROB']._serialized_end=3164
67
- _globals['_TOPLOGPROBS']._serialized_start=3166
68
- _globals['_TOPLOGPROBS']._serialized_end=3214
69
- _globals['_HIDDENSTATES']._serialized_start=3216
70
- _globals['_HIDDENSTATES']._serialized_end=3279
71
- _globals['_EMBEDREQUEST']._serialized_start=3282
72
- _globals['_EMBEDREQUEST']._serialized_end=3612
73
- _globals['_EMBEDRESPONSE']._serialized_start=3615
74
- _globals['_EMBEDRESPONSE']._serialized_end=3772
75
- _globals['_EMBEDCOMPLETE']._serialized_start=3775
76
- _globals['_EMBEDCOMPLETE']._serialized_end=3938
77
- _globals['_EMBEDDING']._serialized_start=3940
78
- _globals['_EMBEDDING']._serialized_end=3982
79
- _globals['_EMBEDERROR']._serialized_start=3984
80
- _globals['_EMBEDERROR']._serialized_end=4044
81
- _globals['_HEALTHCHECKREQUEST']._serialized_start=4046
82
- _globals['_HEALTHCHECKREQUEST']._serialized_end=4124
83
- _globals['_HEALTHCHECKRESPONSE']._serialized_start=4126
84
- _globals['_HEALTHCHECKRESPONSE']._serialized_end=4181
85
- _globals['_ABORTREQUEST']._serialized_start=4183
86
- _globals['_ABORTREQUEST']._serialized_end=4233
87
- _globals['_ABORTRESPONSE']._serialized_start=4235
88
- _globals['_ABORTRESPONSE']._serialized_end=4284
89
- _globals['_LOADLORAREQUEST']._serialized_start=4286
90
- _globals['_LOADLORAREQUEST']._serialized_end=4359
91
- _globals['_LOADLORARESPONSE']._serialized_start=4361
92
- _globals['_LOADLORARESPONSE']._serialized_end=4433
93
- _globals['_UNLOADLORAREQUEST']._serialized_start=4435
94
- _globals['_UNLOADLORAREQUEST']._serialized_end=4474
95
- _globals['_UNLOADLORARESPONSE']._serialized_start=4476
96
- _globals['_UNLOADLORARESPONSE']._serialized_end=4530
97
- _globals['_UPDATEWEIGHTSREQUEST']._serialized_start=4532
98
- _globals['_UPDATEWEIGHTSREQUEST']._serialized_end=4651
99
- _globals['_UPDATEWEIGHTSRESPONSE']._serialized_start=4653
100
- _globals['_UPDATEWEIGHTSRESPONSE']._serialized_end=4710
101
- _globals['_GETINTERNALSTATEREQUEST']._serialized_start=4712
102
- _globals['_GETINTERNALSTATEREQUEST']._serialized_end=4757
103
- _globals['_GETINTERNALSTATERESPONSE']._serialized_start=4759
104
- _globals['_GETINTERNALSTATERESPONSE']._serialized_end=4825
105
- _globals['_SETINTERNALSTATEREQUEST']._serialized_start=4827
106
- _globals['_SETINTERNALSTATEREQUEST']._serialized_end=4892
107
- _globals['_SETINTERNALSTATERESPONSE']._serialized_start=4894
108
- _globals['_SETINTERNALSTATERESPONSE']._serialized_end=4954
109
- _globals['_SGLANGSCHEDULER']._serialized_start=4957
110
- _globals['_SGLANGSCHEDULER']._serialized_end=5339
42
+ _globals['_SAMPLINGPARAMS']._serialized_end=833
43
+ _globals['_SAMPLINGPARAMS_LOGITBIASENTRY']._serialized_start=732
44
+ _globals['_SAMPLINGPARAMS_LOGITBIASENTRY']._serialized_end=780
45
+ _globals['_DISAGGREGATEDPARAMS']._serialized_start=835
46
+ _globals['_DISAGGREGATEDPARAMS']._serialized_end=928
47
+ _globals['_GENERATEREQUEST']._serialized_start=931
48
+ _globals['_GENERATEREQUEST']._serialized_end=1541
49
+ _globals['_TOKENIZEDINPUT']._serialized_start=1543
50
+ _globals['_TOKENIZEDINPUT']._serialized_end=1601
51
+ _globals['_MULTIMODALINPUTS']._serialized_start=1604
52
+ _globals['_MULTIMODALINPUTS']._serialized_end=1815
53
+ _globals['_GENERATERESPONSE']._serialized_start=1818
54
+ _globals['_GENERATERESPONSE']._serialized_end=2045
55
+ _globals['_GENERATESTREAMCHUNK']._serialized_start=2048
56
+ _globals['_GENERATESTREAMCHUNK']._serialized_end=2325
57
+ _globals['_GENERATECOMPLETE']._serialized_start=2328
58
+ _globals['_GENERATECOMPLETE']._serialized_end=2739
59
+ _globals['_GENERATEERROR']._serialized_start=2741
60
+ _globals['_GENERATEERROR']._serialized_end=2816
61
+ _globals['_OUTPUTLOGPROBS']._serialized_start=2818
62
+ _globals['_OUTPUTLOGPROBS']._serialized_end=2935
63
+ _globals['_INPUTLOGPROBS']._serialized_start=2938
64
+ _globals['_INPUTLOGPROBS']._serialized_end=3096
65
+ _globals['_INPUTTOKENLOGPROB']._serialized_start=3098
66
+ _globals['_INPUTTOKENLOGPROB']._serialized_end=3147
67
+ _globals['_TOPLOGPROBS']._serialized_start=3149
68
+ _globals['_TOPLOGPROBS']._serialized_end=3197
69
+ _globals['_HIDDENSTATES']._serialized_start=3199
70
+ _globals['_HIDDENSTATES']._serialized_end=3262
71
+ _globals['_EMBEDREQUEST']._serialized_start=3265
72
+ _globals['_EMBEDREQUEST']._serialized_end=3595
73
+ _globals['_EMBEDRESPONSE']._serialized_start=3598
74
+ _globals['_EMBEDRESPONSE']._serialized_end=3755
75
+ _globals['_EMBEDCOMPLETE']._serialized_start=3758
76
+ _globals['_EMBEDCOMPLETE']._serialized_end=3921
77
+ _globals['_EMBEDDING']._serialized_start=3923
78
+ _globals['_EMBEDDING']._serialized_end=3965
79
+ _globals['_EMBEDERROR']._serialized_start=3967
80
+ _globals['_EMBEDERROR']._serialized_end=4027
81
+ _globals['_HEALTHCHECKREQUEST']._serialized_start=4029
82
+ _globals['_HEALTHCHECKREQUEST']._serialized_end=4049
83
+ _globals['_HEALTHCHECKRESPONSE']._serialized_start=4051
84
+ _globals['_HEALTHCHECKRESPONSE']._serialized_end=4106
85
+ _globals['_ABORTREQUEST']._serialized_start=4108
86
+ _globals['_ABORTREQUEST']._serialized_end=4158
87
+ _globals['_ABORTRESPONSE']._serialized_start=4160
88
+ _globals['_ABORTRESPONSE']._serialized_end=4209
89
+ _globals['_LOADLORAREQUEST']._serialized_start=4211
90
+ _globals['_LOADLORAREQUEST']._serialized_end=4284
91
+ _globals['_LOADLORARESPONSE']._serialized_start=4286
92
+ _globals['_LOADLORARESPONSE']._serialized_end=4358
93
+ _globals['_UNLOADLORAREQUEST']._serialized_start=4360
94
+ _globals['_UNLOADLORAREQUEST']._serialized_end=4399
95
+ _globals['_UNLOADLORARESPONSE']._serialized_start=4401
96
+ _globals['_UNLOADLORARESPONSE']._serialized_end=4455
97
+ _globals['_UPDATEWEIGHTSREQUEST']._serialized_start=4457
98
+ _globals['_UPDATEWEIGHTSREQUEST']._serialized_end=4576
99
+ _globals['_UPDATEWEIGHTSRESPONSE']._serialized_start=4578
100
+ _globals['_UPDATEWEIGHTSRESPONSE']._serialized_end=4635
101
+ _globals['_GETINTERNALSTATEREQUEST']._serialized_start=4637
102
+ _globals['_GETINTERNALSTATEREQUEST']._serialized_end=4682
103
+ _globals['_GETINTERNALSTATERESPONSE']._serialized_start=4684
104
+ _globals['_GETINTERNALSTATERESPONSE']._serialized_end=4750
105
+ _globals['_SETINTERNALSTATEREQUEST']._serialized_start=4752
106
+ _globals['_SETINTERNALSTATEREQUEST']._serialized_end=4817
107
+ _globals['_SETINTERNALSTATERESPONSE']._serialized_start=4819
108
+ _globals['_SETINTERNALSTATERESPONSE']._serialized_end=4879
109
+ _globals['_GETMODELINFOREQUEST']._serialized_start=4881
110
+ _globals['_GETMODELINFOREQUEST']._serialized_end=4902
111
+ _globals['_GETMODELINFORESPONSE']._serialized_start=4905
112
+ _globals['_GETMODELINFORESPONSE']._serialized_end=5267
113
+ _globals['_GETSERVERINFOREQUEST']._serialized_start=5269
114
+ _globals['_GETSERVERINFOREQUEST']._serialized_end=5291
115
+ _globals['_GETSERVERINFORESPONSE']._serialized_start=5294
116
+ _globals['_GETSERVERINFORESPONSE']._serialized_end=5605
117
+ _globals['_SGLANGSCHEDULER']._serialized_start=5608
118
+ _globals['_SGLANGSCHEDULER']._serialized_end=6203
111
119
  # @@protoc_insertion_point(module_scope)
@@ -11,7 +11,7 @@ from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union
11
11
  DESCRIPTOR: _descriptor.FileDescriptor
12
12
 
13
13
  class SamplingParams(_message.Message):
14
- __slots__ = ("temperature", "top_p", "top_k", "min_p", "frequency_penalty", "presence_penalty", "repetition_penalty", "max_new_tokens", "stop", "stop_token_ids", "skip_special_tokens", "spaces_between_special_tokens", "regex", "json_schema", "ebnf_grammar", "structural_tag", "lora_path", "n", "token_healing", "min_new_tokens", "ignore_eos", "no_stop_trim", "stream_interval", "logit_bias", "custom_params")
14
+ __slots__ = ("temperature", "top_p", "top_k", "min_p", "frequency_penalty", "presence_penalty", "repetition_penalty", "max_new_tokens", "stop", "stop_token_ids", "skip_special_tokens", "spaces_between_special_tokens", "regex", "json_schema", "ebnf_grammar", "structural_tag", "n", "min_new_tokens", "ignore_eos", "no_stop_trim", "stream_interval", "logit_bias", "custom_params")
15
15
  class LogitBiasEntry(_message.Message):
16
16
  __slots__ = ("key", "value")
17
17
  KEY_FIELD_NUMBER: _ClassVar[int]
@@ -35,9 +35,7 @@ class SamplingParams(_message.Message):
35
35
  JSON_SCHEMA_FIELD_NUMBER: _ClassVar[int]
36
36
  EBNF_GRAMMAR_FIELD_NUMBER: _ClassVar[int]
37
37
  STRUCTURAL_TAG_FIELD_NUMBER: _ClassVar[int]
38
- LORA_PATH_FIELD_NUMBER: _ClassVar[int]
39
38
  N_FIELD_NUMBER: _ClassVar[int]
40
- TOKEN_HEALING_FIELD_NUMBER: _ClassVar[int]
41
39
  MIN_NEW_TOKENS_FIELD_NUMBER: _ClassVar[int]
42
40
  IGNORE_EOS_FIELD_NUMBER: _ClassVar[int]
43
41
  NO_STOP_TRIM_FIELD_NUMBER: _ClassVar[int]
@@ -60,16 +58,14 @@ class SamplingParams(_message.Message):
60
58
  json_schema: str
61
59
  ebnf_grammar: str
62
60
  structural_tag: str
63
- lora_path: str
64
61
  n: int
65
- token_healing: bool
66
62
  min_new_tokens: int
67
63
  ignore_eos: bool
68
64
  no_stop_trim: bool
69
65
  stream_interval: int
70
66
  logit_bias: _containers.ScalarMap[str, float]
71
67
  custom_params: _struct_pb2.Struct
72
- def __init__(self, temperature: _Optional[float] = ..., top_p: _Optional[float] = ..., top_k: _Optional[int] = ..., min_p: _Optional[float] = ..., frequency_penalty: _Optional[float] = ..., presence_penalty: _Optional[float] = ..., repetition_penalty: _Optional[float] = ..., max_new_tokens: _Optional[int] = ..., stop: _Optional[_Iterable[str]] = ..., stop_token_ids: _Optional[_Iterable[int]] = ..., skip_special_tokens: bool = ..., spaces_between_special_tokens: bool = ..., regex: _Optional[str] = ..., json_schema: _Optional[str] = ..., ebnf_grammar: _Optional[str] = ..., structural_tag: _Optional[str] = ..., lora_path: _Optional[str] = ..., n: _Optional[int] = ..., token_healing: bool = ..., min_new_tokens: _Optional[int] = ..., ignore_eos: bool = ..., no_stop_trim: bool = ..., stream_interval: _Optional[int] = ..., logit_bias: _Optional[_Mapping[str, float]] = ..., custom_params: _Optional[_Union[_struct_pb2.Struct, _Mapping]] = ...) -> None: ...
68
+ def __init__(self, temperature: _Optional[float] = ..., top_p: _Optional[float] = ..., top_k: _Optional[int] = ..., min_p: _Optional[float] = ..., frequency_penalty: _Optional[float] = ..., presence_penalty: _Optional[float] = ..., repetition_penalty: _Optional[float] = ..., max_new_tokens: _Optional[int] = ..., stop: _Optional[_Iterable[str]] = ..., stop_token_ids: _Optional[_Iterable[int]] = ..., skip_special_tokens: bool = ..., spaces_between_special_tokens: bool = ..., regex: _Optional[str] = ..., json_schema: _Optional[str] = ..., ebnf_grammar: _Optional[str] = ..., structural_tag: _Optional[str] = ..., n: _Optional[int] = ..., min_new_tokens: _Optional[int] = ..., ignore_eos: bool = ..., no_stop_trim: bool = ..., stream_interval: _Optional[int] = ..., logit_bias: _Optional[_Mapping[str, float]] = ..., custom_params: _Optional[_Union[_struct_pb2.Struct, _Mapping]] = ...) -> None: ...
73
69
 
74
70
  class DisaggregatedParams(_message.Message):
75
71
  __slots__ = ("bootstrap_host", "bootstrap_port", "bootstrap_room")
@@ -324,10 +320,8 @@ class EmbedError(_message.Message):
324
320
  def __init__(self, message: _Optional[str] = ..., code: _Optional[str] = ..., details: _Optional[str] = ...) -> None: ...
325
321
 
326
322
  class HealthCheckRequest(_message.Message):
327
- __slots__ = ("tokenized",)
328
- TOKENIZED_FIELD_NUMBER: _ClassVar[int]
329
- tokenized: TokenizedInput
330
- def __init__(self, tokenized: _Optional[_Union[TokenizedInput, _Mapping]] = ...) -> None: ...
323
+ __slots__ = ()
324
+ def __init__(self) -> None: ...
331
325
 
332
326
  class HealthCheckResponse(_message.Message):
333
327
  __slots__ = ("healthy", "message")
@@ -432,3 +426,65 @@ class SetInternalStateResponse(_message.Message):
432
426
  success: bool
433
427
  message: str
434
428
  def __init__(self, success: bool = ..., message: _Optional[str] = ...) -> None: ...
429
+
430
+ class GetModelInfoRequest(_message.Message):
431
+ __slots__ = ()
432
+ def __init__(self) -> None: ...
433
+
434
+ class GetModelInfoResponse(_message.Message):
435
+ __slots__ = ("model_path", "tokenizer_path", "is_generation", "preferred_sampling_params", "weight_version", "served_model_name", "max_context_length", "vocab_size", "supports_vision", "model_type", "eos_token_ids", "pad_token_id", "bos_token_id", "max_req_input_len")
436
+ MODEL_PATH_FIELD_NUMBER: _ClassVar[int]
437
+ TOKENIZER_PATH_FIELD_NUMBER: _ClassVar[int]
438
+ IS_GENERATION_FIELD_NUMBER: _ClassVar[int]
439
+ PREFERRED_SAMPLING_PARAMS_FIELD_NUMBER: _ClassVar[int]
440
+ WEIGHT_VERSION_FIELD_NUMBER: _ClassVar[int]
441
+ SERVED_MODEL_NAME_FIELD_NUMBER: _ClassVar[int]
442
+ MAX_CONTEXT_LENGTH_FIELD_NUMBER: _ClassVar[int]
443
+ VOCAB_SIZE_FIELD_NUMBER: _ClassVar[int]
444
+ SUPPORTS_VISION_FIELD_NUMBER: _ClassVar[int]
445
+ MODEL_TYPE_FIELD_NUMBER: _ClassVar[int]
446
+ EOS_TOKEN_IDS_FIELD_NUMBER: _ClassVar[int]
447
+ PAD_TOKEN_ID_FIELD_NUMBER: _ClassVar[int]
448
+ BOS_TOKEN_ID_FIELD_NUMBER: _ClassVar[int]
449
+ MAX_REQ_INPUT_LEN_FIELD_NUMBER: _ClassVar[int]
450
+ model_path: str
451
+ tokenizer_path: str
452
+ is_generation: bool
453
+ preferred_sampling_params: str
454
+ weight_version: str
455
+ served_model_name: str
456
+ max_context_length: int
457
+ vocab_size: int
458
+ supports_vision: bool
459
+ model_type: str
460
+ eos_token_ids: _containers.RepeatedScalarFieldContainer[int]
461
+ pad_token_id: int
462
+ bos_token_id: int
463
+ max_req_input_len: int
464
+ def __init__(self, model_path: _Optional[str] = ..., tokenizer_path: _Optional[str] = ..., is_generation: bool = ..., preferred_sampling_params: _Optional[str] = ..., weight_version: _Optional[str] = ..., served_model_name: _Optional[str] = ..., max_context_length: _Optional[int] = ..., vocab_size: _Optional[int] = ..., supports_vision: bool = ..., model_type: _Optional[str] = ..., eos_token_ids: _Optional[_Iterable[int]] = ..., pad_token_id: _Optional[int] = ..., bos_token_id: _Optional[int] = ..., max_req_input_len: _Optional[int] = ...) -> None: ...
465
+
466
+ class GetServerInfoRequest(_message.Message):
467
+ __slots__ = ()
468
+ def __init__(self) -> None: ...
469
+
470
+ class GetServerInfoResponse(_message.Message):
471
+ __slots__ = ("server_args", "scheduler_info", "active_requests", "is_paused", "last_receive_timestamp", "uptime_seconds", "sglang_version", "server_type", "start_time")
472
+ SERVER_ARGS_FIELD_NUMBER: _ClassVar[int]
473
+ SCHEDULER_INFO_FIELD_NUMBER: _ClassVar[int]
474
+ ACTIVE_REQUESTS_FIELD_NUMBER: _ClassVar[int]
475
+ IS_PAUSED_FIELD_NUMBER: _ClassVar[int]
476
+ LAST_RECEIVE_TIMESTAMP_FIELD_NUMBER: _ClassVar[int]
477
+ UPTIME_SECONDS_FIELD_NUMBER: _ClassVar[int]
478
+ SGLANG_VERSION_FIELD_NUMBER: _ClassVar[int]
479
+ SERVER_TYPE_FIELD_NUMBER: _ClassVar[int]
480
+ START_TIME_FIELD_NUMBER: _ClassVar[int]
481
+ server_args: _struct_pb2.Struct
482
+ scheduler_info: _struct_pb2.Struct
483
+ active_requests: int
484
+ is_paused: bool
485
+ last_receive_timestamp: float
486
+ uptime_seconds: float
487
+ sglang_version: str
488
+ server_type: str
489
+ start_time: _timestamp_pb2.Timestamp
490
+ def __init__(self, server_args: _Optional[_Union[_struct_pb2.Struct, _Mapping]] = ..., scheduler_info: _Optional[_Union[_struct_pb2.Struct, _Mapping]] = ..., active_requests: _Optional[int] = ..., is_paused: bool = ..., last_receive_timestamp: _Optional[float] = ..., uptime_seconds: _Optional[float] = ..., sglang_version: _Optional[str] = ..., server_type: _Optional[str] = ..., start_time: _Optional[_Union[datetime.datetime, _timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ...
@@ -8,7 +8,7 @@ import warnings
8
8
 
9
9
  from . import sglang_scheduler_pb2 as sglang__scheduler__pb2
10
10
 
11
- GRPC_GENERATED_VERSION = '1.74.0'
11
+ GRPC_GENERATED_VERSION = '1.75.1'
12
12
  GRPC_VERSION = grpc.__version__
13
13
  _version_not_supported = False
14
14
 
@@ -59,6 +59,16 @@ class SglangSchedulerStub(object):
59
59
  request_serializer=sglang__scheduler__pb2.AbortRequest.SerializeToString,
60
60
  response_deserializer=sglang__scheduler__pb2.AbortResponse.FromString,
61
61
  _registered_method=True)
62
+ self.GetModelInfo = channel.unary_unary(
63
+ '/sglang.grpc.scheduler.SglangScheduler/GetModelInfo',
64
+ request_serializer=sglang__scheduler__pb2.GetModelInfoRequest.SerializeToString,
65
+ response_deserializer=sglang__scheduler__pb2.GetModelInfoResponse.FromString,
66
+ _registered_method=True)
67
+ self.GetServerInfo = channel.unary_unary(
68
+ '/sglang.grpc.scheduler.SglangScheduler/GetServerInfo',
69
+ request_serializer=sglang__scheduler__pb2.GetServerInfoRequest.SerializeToString,
70
+ response_deserializer=sglang__scheduler__pb2.GetServerInfoResponse.FromString,
71
+ _registered_method=True)
62
72
 
63
73
 
64
74
  class SglangSchedulerServicer(object):
@@ -94,6 +104,20 @@ class SglangSchedulerServicer(object):
94
104
  context.set_details('Method not implemented!')
95
105
  raise NotImplementedError('Method not implemented!')
96
106
 
107
+ def GetModelInfo(self, request, context):
108
+ """Get model information
109
+ """
110
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
111
+ context.set_details('Method not implemented!')
112
+ raise NotImplementedError('Method not implemented!')
113
+
114
+ def GetServerInfo(self, request, context):
115
+ """Get server information
116
+ """
117
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
118
+ context.set_details('Method not implemented!')
119
+ raise NotImplementedError('Method not implemented!')
120
+
97
121
 
98
122
  def add_SglangSchedulerServicer_to_server(servicer, server):
99
123
  rpc_method_handlers = {
@@ -117,6 +141,16 @@ def add_SglangSchedulerServicer_to_server(servicer, server):
117
141
  request_deserializer=sglang__scheduler__pb2.AbortRequest.FromString,
118
142
  response_serializer=sglang__scheduler__pb2.AbortResponse.SerializeToString,
119
143
  ),
144
+ 'GetModelInfo': grpc.unary_unary_rpc_method_handler(
145
+ servicer.GetModelInfo,
146
+ request_deserializer=sglang__scheduler__pb2.GetModelInfoRequest.FromString,
147
+ response_serializer=sglang__scheduler__pb2.GetModelInfoResponse.SerializeToString,
148
+ ),
149
+ 'GetServerInfo': grpc.unary_unary_rpc_method_handler(
150
+ servicer.GetServerInfo,
151
+ request_deserializer=sglang__scheduler__pb2.GetServerInfoRequest.FromString,
152
+ response_serializer=sglang__scheduler__pb2.GetServerInfoResponse.SerializeToString,
153
+ ),
120
154
  }
121
155
  generic_handler = grpc.method_handlers_generic_handler(
122
156
  'sglang.grpc.scheduler.SglangScheduler', rpc_method_handlers)
@@ -237,3 +271,57 @@ class SglangScheduler(object):
237
271
  timeout,
238
272
  metadata,
239
273
  _registered_method=True)
274
+
275
+ @staticmethod
276
+ def GetModelInfo(request,
277
+ target,
278
+ options=(),
279
+ channel_credentials=None,
280
+ call_credentials=None,
281
+ insecure=False,
282
+ compression=None,
283
+ wait_for_ready=None,
284
+ timeout=None,
285
+ metadata=None):
286
+ return grpc.experimental.unary_unary(
287
+ request,
288
+ target,
289
+ '/sglang.grpc.scheduler.SglangScheduler/GetModelInfo',
290
+ sglang__scheduler__pb2.GetModelInfoRequest.SerializeToString,
291
+ sglang__scheduler__pb2.GetModelInfoResponse.FromString,
292
+ options,
293
+ channel_credentials,
294
+ insecure,
295
+ call_credentials,
296
+ compression,
297
+ wait_for_ready,
298
+ timeout,
299
+ metadata,
300
+ _registered_method=True)
301
+
302
+ @staticmethod
303
+ def GetServerInfo(request,
304
+ target,
305
+ options=(),
306
+ channel_credentials=None,
307
+ call_credentials=None,
308
+ insecure=False,
309
+ compression=None,
310
+ wait_for_ready=None,
311
+ timeout=None,
312
+ metadata=None):
313
+ return grpc.experimental.unary_unary(
314
+ request,
315
+ target,
316
+ '/sglang.grpc.scheduler.SglangScheduler/GetServerInfo',
317
+ sglang__scheduler__pb2.GetServerInfoRequest.SerializeToString,
318
+ sglang__scheduler__pb2.GetServerInfoResponse.FromString,
319
+ options,
320
+ channel_credentials,
321
+ insecure,
322
+ call_credentials,
323
+ compression,
324
+ wait_for_ready,
325
+ timeout,
326
+ metadata,
327
+ _registered_method=True)
@@ -380,4 +380,7 @@ if not (
380
380
  logger.info(
381
381
  "sgl-kernel is not available on Non-NV, Non-AMD platforms or Non-AMX CPUs. Fallback to other kernel libraries."
382
382
  )
383
- from vllm.model_executor.layers.activation import GeluAndMul, SiluAndMul
383
+ from vllm.model_executor.layers.activation import ( # noqa: F401
384
+ GeluAndMul,
385
+ SiluAndMul,
386
+ )
@@ -1064,7 +1064,7 @@ class AiterMultiStepDraftBackend:
1064
1064
  device=model_runner.device,
1065
1065
  )
1066
1066
  self.attn_backends = []
1067
- for i in range(self.speculative_num_steps):
1067
+ for i in range(self.speculative_num_steps - 1):
1068
1068
  self.attn_backends.append(
1069
1069
  AiterAttnBackend(
1070
1070
  model_runner,
@@ -1107,7 +1107,7 @@ class AiterMultiStepDraftBackend:
1107
1107
  self.page_size,
1108
1108
  )
1109
1109
 
1110
- for i in range(self.speculative_num_steps):
1110
+ for i in range(self.speculative_num_steps - 1):
1111
1111
  forward_batch.spec_info.kv_indptr = self.kv_indptr[i, : bs + 1]
1112
1112
  forward_batch.spec_info.kv_indices = kv_indices_buffer[i][
1113
1113
  : seq_lens_sum * self.topk + bs * (i + 1)
@@ -1141,7 +1141,7 @@ class AiterMultiStepDraftBackend:
1141
1141
  dtype=torch.int32,
1142
1142
  device=self.device,
1143
1143
  )
1144
- for i in range(self.speculative_num_steps):
1144
+ for i in range(self.speculative_num_steps - 1):
1145
1145
  self.attn_backends[i].init_cuda_graph_state(
1146
1146
  max_bs, max_num_tokens, kv_indices_buf=self.cuda_graph_kv_indices[i]
1147
1147
  )
@@ -20,7 +20,6 @@ if TYPE_CHECKING:
20
20
  from sglang.srt.layers.radix_attention import RadixAttention
21
21
  from sglang.srt.model_executor.model_runner import ModelRunner
22
22
 
23
- import os
24
23
 
25
24
  import numpy as np
26
25
 
@@ -356,6 +355,11 @@ class AscendAttnBackend(AttentionBackend):
356
355
  assert (
357
356
  layer.qk_head_dim != layer.v_head_dim
358
357
  ), "FIA only supports qk_head_dim != v_head_dim"
358
+ num_token_padding = q.shape[0]
359
+ q, k, v = [
360
+ data[: forward_batch.num_token_non_padded_cpu] for data in [q, k, v]
361
+ ]
362
+
359
363
  q_nope, q_rope = q.split([layer.v_head_dim, self.qk_rope_head_dim], dim=-1)
360
364
  k_nope, k_rope = k.split([layer.v_head_dim, self.qk_rope_head_dim], dim=-1)
361
365
 
@@ -375,6 +379,18 @@ class AscendAttnBackend(AttentionBackend):
375
379
  next_tokens=0,
376
380
  )
377
381
 
382
+ attn_output = attn_output.reshape(-1, layer.tp_q_head_num, layer.v_head_dim)
383
+ if num_token_padding != forward_batch.num_token_non_padded_cpu:
384
+ attn_output = torch.cat(
385
+ [
386
+ attn_output,
387
+ attn_output.new_zeros(
388
+ num_token_padding - attn_output.shape[0],
389
+ *attn_output.shape[1:],
390
+ ),
391
+ ],
392
+ dim=0,
393
+ )
378
394
  return attn_output
379
395
 
380
396
  def forward_decode_graph(
@@ -1,7 +1,14 @@
1
1
  import logging
2
+ from typing import TYPE_CHECKING
2
3
 
3
4
  logger = logging.getLogger(__name__)
4
5
 
6
+
7
+ if TYPE_CHECKING:
8
+ # evade circular imports
9
+ from sglang.srt.layers.attention.base_attn_backend import AttentionBackend
10
+ from sglang.srt.model_executor.model_runner import ModelRunner
11
+
5
12
  ATTENTION_BACKENDS = {}
6
13
 
7
14
 
@@ -27,7 +34,9 @@ def create_flashinfer_backend(runner):
27
34
  or not runner.plan_stream_for_flashinfer
28
35
  ):
29
36
  runner.plan_stream_for_flashinfer = torch.cuda.Stream()
30
- return FlashInferAttnBackend(runner)
37
+ return FlashInferAttnBackend(
38
+ runner, init_new_workspace=runner.init_new_workspace
39
+ )
31
40
  else:
32
41
  from sglang.srt.layers.attention.flashinfer_mla_backend import (
33
42
  FlashInferMLAAttnBackend,
@@ -129,9 +138,6 @@ def create_flashattention_v3_backend(runner):
129
138
 
130
139
  @register_attention_backend("fa4")
131
140
  def create_flashattention_v4_backend(runner):
132
- assert (
133
- runner.use_mla_backend
134
- ), "FlashAttention v4 Support is at an early stage, only MLA model supported now"
135
141
  from sglang.srt.layers.attention.flashattention_backend import FlashAttentionBackend
136
142
 
137
143
  return FlashAttentionBackend(runner, fa_impl_ver=4)
@@ -169,38 +175,52 @@ def create_dual_chunk_flash_attn_backend(runner):
169
175
  return DualChunkFlashAttentionBackend(runner)
170
176
 
171
177
 
172
- def attn_backend_wrapper(runner, full_attn_backend):
178
+ def attn_backend_wrapper(runner: "ModelRunner", full_attn_backend: "AttentionBackend"):
173
179
  """
174
180
  Wrapper for special models like hybrid GDN, so we don't
175
181
  need to change the code of the original attention backend.
176
182
  """
177
183
  assert not (
178
- runner.is_hybrid_gdn and runner.use_mla_backend
184
+ runner.hybrid_gdn_config is not None and runner.use_mla_backend
179
185
  ), "hybrid_gdn can only be used with non-MLA models."
180
186
 
181
- # wrap for hybrid GDN models
182
- if runner.is_hybrid_gdn:
183
- from sglang.srt.utils import is_blackwell, is_npu
184
-
185
- if is_blackwell():
186
- assert (
187
- runner.server_args.attention_backend == "triton"
188
- or runner.server_args.attention_backend == "trtllm_mha"
189
- ), "triton or trtllm_mha backend are the only supported backends on Blackwell GPUs for hybrid GDN models, use --attention-backend triton or --attention-backend trtllm_mha to specify the backend."
190
- if is_npu():
191
- assert (
192
- runner.server_args.attention_backend == "ascend"
193
- ), "ascend backend is the only supported backend on NPU for hybrid GDN models, use --attention-backend ascend to specify the backend."
194
- logger.info(f"Using hybrid linear attention backend for hybrid GDN models.")
187
+ if cfg := runner.mambaish_config:
188
+ from sglang.srt.layers.attention.fla.utils import check_environments
195
189
  from sglang.srt.layers.attention.hybrid_linear_attn_backend import (
190
+ GDNAttnBackend,
196
191
  HybridLinearAttnBackend,
197
- MambaAttnBackend,
192
+ Mamba2AttnBackend,
198
193
  )
194
+ from sglang.srt.utils import is_blackwell, is_npu
199
195
 
200
- linear_attn_backend = MambaAttnBackend(runner)
201
- full_attn_layers = runner.model_config.hf_config.full_attention_layer_ids
196
+ check_environments()
197
+ if runner.hybrid_gdn_config is not None:
198
+ if is_blackwell():
199
+ assert (
200
+ runner.server_args.attention_backend == "triton"
201
+ ), "triton backend is the only supported backend on Blackwell GPUs for hybrid GDN models, use --attention-backend triton to specify the backend."
202
+ if is_npu():
203
+ assert (
204
+ runner.server_args.attention_backend == "ascend"
205
+ ), "ascend backend is the only supported backend on NPU for hybrid GDN models, use --attention-backend ascend to specify the backend."
206
+ logger.info(f"Using hybrid linear attention backend for hybrid GDN models.")
207
+ linear_attn_backend = GDNAttnBackend(runner)
208
+ elif runner.mamba2_config is not None:
209
+ linear_attn_backend = Mamba2AttnBackend(runner)
210
+ else:
211
+ raise ValueError(
212
+ "Expected hybrid GDN or NemotronH models, but got unknown model."
213
+ )
214
+ full_attn_layers = cfg.full_attention_layer_ids
202
215
  return HybridLinearAttnBackend(
203
216
  full_attn_backend, linear_attn_backend, full_attn_layers
204
217
  )
205
218
 
206
219
  return full_attn_backend
220
+
221
+
222
+ @register_attention_backend("intel_xpu")
223
+ def create_intel_xpu_backend(runner):
224
+ from sglang.srt.layers.attention.xpu_backend import XPUAttentionBackend
225
+
226
+ return XPUAttentionBackend(runner)