sglang 0.5.1.post2__py3-none-any.whl → 0.5.1.post3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. sglang/bench_one_batch_server.py +79 -53
  2. sglang/bench_serving.py +186 -14
  3. sglang/profiler.py +0 -1
  4. sglang/srt/conversation.py +38 -5
  5. sglang/srt/entrypoints/engine.py +1 -1
  6. sglang/srt/entrypoints/openai/protocol.py +27 -24
  7. sglang/srt/entrypoints/openai/serving_chat.py +50 -9
  8. sglang/srt/entrypoints/openai/serving_completions.py +15 -0
  9. sglang/srt/function_call/deepseekv31_detector.py +222 -0
  10. sglang/srt/function_call/function_call_parser.py +2 -0
  11. sglang/srt/function_call/gpt_oss_detector.py +144 -256
  12. sglang/srt/harmony_parser.py +588 -0
  13. sglang/srt/hf_transformers_utils.py +16 -7
  14. sglang/srt/layers/attention/ascend_backend.py +218 -111
  15. sglang/srt/layers/attention/flashattention_backend.py +241 -7
  16. sglang/srt/layers/attention/flashinfer_backend.py +5 -2
  17. sglang/srt/layers/attention/flashinfer_mla_backend.py +5 -2
  18. sglang/srt/layers/communicator.py +1 -2
  19. sglang/srt/layers/moe/cutlass_moe.py +0 -8
  20. sglang/srt/layers/moe/ep_moe/layer.py +1 -7
  21. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=257,N=64,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  22. sglang/srt/layers/moe/topk.py +1 -1
  23. sglang/srt/layers/quantization/deep_gemm_wrapper/compile_utils.py +133 -235
  24. sglang/srt/layers/quantization/deep_gemm_wrapper/configurer.py +5 -7
  25. sglang/srt/layers/quantization/deep_gemm_wrapper/entrypoint.py +5 -23
  26. sglang/srt/layers/quantization/fp8.py +2 -1
  27. sglang/srt/layers/quantization/fp8_kernel.py +2 -2
  28. sglang/srt/layers/quantization/fp8_utils.py +2 -2
  29. sglang/srt/layers/quantization/mxfp4.py +16 -23
  30. sglang/srt/layers/quantization/mxfp4_tensor.py +3 -1
  31. sglang/srt/layers/utils.py +0 -14
  32. sglang/srt/managers/cache_controller.py +223 -156
  33. sglang/srt/managers/detokenizer_manager.py +5 -0
  34. sglang/srt/managers/io_struct.py +30 -0
  35. sglang/srt/managers/scheduler.py +58 -7
  36. sglang/srt/managers/tokenizer_manager.py +36 -3
  37. sglang/srt/mem_cache/hicache_storage.py +31 -20
  38. sglang/srt/mem_cache/hiradix_cache.py +12 -3
  39. sglang/srt/mem_cache/memory_pool.py +73 -14
  40. sglang/srt/mem_cache/memory_pool_host.py +3 -2
  41. sglang/srt/mem_cache/radix_cache.py +1 -0
  42. sglang/srt/mem_cache/storage/hf3fs/storage_hf3fs.py +5 -13
  43. sglang/srt/mem_cache/storage/mooncake_store/mooncake_store.py +85 -81
  44. sglang/srt/model_executor/model_runner.py +1 -1
  45. sglang/srt/models/deepseek_v2.py +12 -3
  46. sglang/srt/models/gpt_oss.py +2 -1
  47. sglang/srt/models/qwen2_5_vl.py +1 -0
  48. sglang/srt/reasoning_parser.py +56 -300
  49. sglang/srt/server_args.py +10 -1
  50. sglang/srt/tokenizer/tiktoken_tokenizer.py +6 -1
  51. sglang/srt/utils.py +59 -5
  52. sglang/version.py +1 -1
  53. {sglang-0.5.1.post2.dist-info → sglang-0.5.1.post3.dist-info}/METADATA +4 -3
  54. {sglang-0.5.1.post2.dist-info → sglang-0.5.1.post3.dist-info}/RECORD +57 -54
  55. {sglang-0.5.1.post2.dist-info → sglang-0.5.1.post3.dist-info}/WHEEL +0 -0
  56. {sglang-0.5.1.post2.dist-info → sglang-0.5.1.post3.dist-info}/licenses/LICENSE +0 -0
  57. {sglang-0.5.1.post2.dist-info → sglang-0.5.1.post3.dist-info}/top_level.txt +0 -0
@@ -148,6 +148,16 @@ class OpenAIServingChat(OpenAIServingBase):
148
148
  self, request: ChatCompletionRequest, is_multimodal: bool
149
149
  ) -> MessageProcessingResult:
150
150
  """Process chat messages and apply chat template"""
151
+ is_gpt_oss = (
152
+ hasattr(self.tokenizer_manager.model_config, "hf_config")
153
+ and hasattr(self.tokenizer_manager.model_config.hf_config, "model_type")
154
+ and self.tokenizer_manager.model_config.hf_config.model_type == "gpt_oss"
155
+ )
156
+
157
+ # GptOss model needs to keep special tokens for harmony parsing
158
+ if is_gpt_oss:
159
+ request.skip_special_tokens = False
160
+
151
161
  tool_call_constraint = None
152
162
 
153
163
  # Apply chat template and its stop strings
@@ -207,6 +217,25 @@ class OpenAIServingChat(OpenAIServingBase):
207
217
  audio_data,
208
218
  modalities,
209
219
  )
220
+
221
+ # per the Transformers docs & maintainers, tool call arguments in
222
+ # assistant-role messages with tool_calls need to be dicts not JSON str -
223
+ # this is how tool-use chat templates will expect them moving forwards
224
+ # so, for messages that have tool_calls, parse the string (which we get
225
+ # from openAI format) to dict
226
+ if (
227
+ processed_msg["role"] == "assistant"
228
+ and "tool_calls" in processed_msg
229
+ and isinstance(processed_msg["tool_calls"], list)
230
+ ):
231
+ for item in processed_msg["tool_calls"]:
232
+ if "arguments" in item["function"] and isinstance(
233
+ item["function"]["arguments"], str
234
+ ):
235
+ item["function"]["arguments"] = json.loads(
236
+ item["function"]["arguments"]
237
+ )
238
+
210
239
  openai_compatible_messages.append(processed_msg)
211
240
 
212
241
  # Handle assistant prefix for continue_final_message
@@ -806,15 +835,23 @@ class OpenAIServingChat(OpenAIServingBase):
806
835
  finish_reason["matched"] = None
807
836
  try:
808
837
  text, call_info_list = parser.parse_non_stream(text)
809
- tool_calls = [
810
- ToolCall(
811
- id=f"call_{uuid.uuid4().hex[:24]}",
812
- function=FunctionResponse(
813
- name=call_info.name, arguments=call_info.parameters
814
- ),
838
+ tool_calls = []
839
+ for call_info in call_info_list:
840
+ # For Kimi-K2, align tool_call_id with the model format: functions.{name}:{index}
841
+ if tool_call_parser == "kimi_k2" and call_info.name is not None:
842
+ tool_id = f"functions.{call_info.name}:{call_info.tool_index}"
843
+ else:
844
+ tool_id = f"call_{uuid.uuid4().hex[:24]}"
845
+
846
+ tool_calls.append(
847
+ ToolCall(
848
+ id=tool_id,
849
+ index=getattr(call_info, "tool_index", None),
850
+ function=FunctionResponse(
851
+ name=call_info.name, arguments=call_info.parameters
852
+ ),
853
+ )
815
854
  )
816
- for call_info in call_info_list
817
- ]
818
855
  return tool_calls, text, finish_reason
819
856
  except Exception as e:
820
857
  logger.error(f"Tool call parsing error: {e}")
@@ -925,7 +962,11 @@ class OpenAIServingChat(OpenAIServingBase):
925
962
  # Tool call ID should be generated only once per tool call
926
963
  if call_item.name:
927
964
  # First chunk: include ID and function name
928
- tool_call_id = f"call_{uuid.uuid4().hex[:24]}"
965
+ if self.tokenizer_manager.server_args.tool_call_parser == "kimi_k2":
966
+ # Align with Kimi-K2 format: functions.{name}:{index}
967
+ tool_call_id = f"functions.{call_item.name}:{call_item.tool_index}"
968
+ else:
969
+ tool_call_id = f"call_{uuid.uuid4().hex[:24]}"
929
970
  function_name = call_item.name
930
971
  else:
931
972
  # Subsequent chunks: null ID and name for argument deltas
@@ -23,6 +23,7 @@ from sglang.srt.entrypoints.openai.utils import (
23
23
  from sglang.srt.managers.io_struct import GenerateReqInput
24
24
  from sglang.srt.managers.template_manager import TemplateManager
25
25
  from sglang.srt.managers.tokenizer_manager import TokenizerManager
26
+ from sglang.utils import convert_json_schema_to_str
26
27
 
27
28
  logger = logging.getLogger(__name__)
28
29
 
@@ -125,6 +126,20 @@ class OpenAIServingCompletion(OpenAIServingBase):
125
126
  "logit_bias": request.logit_bias,
126
127
  }
127
128
 
129
+ # Handle response_format constraints
130
+ if request.response_format and request.response_format.type == "json_schema":
131
+ sampling_params["json_schema"] = convert_json_schema_to_str(
132
+ request.response_format.json_schema.schema_
133
+ )
134
+ elif request.response_format and request.response_format.type == "json_object":
135
+ sampling_params["json_schema"] = '{"type": "object"}'
136
+ elif (
137
+ request.response_format and request.response_format.type == "structural_tag"
138
+ ):
139
+ sampling_params["structural_tag"] = convert_json_schema_to_str(
140
+ request.response_format.model_dump(by_alias=True)
141
+ )
142
+
128
143
  return sampling_params
129
144
 
130
145
  async def _handle_streaming_request(
@@ -0,0 +1,222 @@
1
+ import json
2
+ import logging
3
+ import re
4
+ from typing import List
5
+
6
+ from sglang.srt.entrypoints.openai.protocol import Tool
7
+ from sglang.srt.function_call.base_format_detector import BaseFormatDetector
8
+ from sglang.srt.function_call.core_types import (
9
+ StreamingParseResult,
10
+ StructureInfo,
11
+ ToolCallItem,
12
+ _GetInfoFunc,
13
+ )
14
+ from sglang.srt.function_call.ebnf_composer import EBNFComposer
15
+ from sglang.srt.function_call.utils import _is_complete_json
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class DeepSeekV31Detector(BaseFormatDetector):
21
+ """
22
+ Detector for DeepSeek V3 model function call format.
23
+
24
+ The DeepSeek V3 format uses special Unicode tokens to delimit function calls
25
+ with JSON code blocks for arguments.
26
+
27
+ Format Structure:
28
+ ```
29
+ <|tool▁calls▁begin|><|tool▁call▁begin|>{function_name}<|tool▁sep|>{json_arguments}<|tool▁calls▁end|><|end▁of▁sentence|>
30
+ ```
31
+ Examples:
32
+ ```
33
+ <|tool▁calls▁begin|><|tool▁call▁begin|>get_current_weather<|tool▁sep|>{"location": "Tokyo"}<|tool▁call▁end|><|tool▁call▁begin|>get_current_weather<|tool▁sep|>{"location": "Paris"}<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|>
34
+ ```
35
+
36
+ Key Components:
37
+ - Tool Calls Section: Wrapped between `<|tool▁calls▁begin|>` and `<|tool▁calls▁end|>`
38
+ - Individual Tool Call: Wrapped between `<|tool▁call▁begin|>` and `<|tool▁call▁end|>`
39
+ - Function Declaration: `<|tool▁call▁begin|>{function_name}<|tool▁sep|>`
40
+ - Arguments: JSON code block between `<|tool▁sep|>` and `<|tool▁call▁end|>`
41
+ - Supports multiple tool calls
42
+
43
+ Reference: https://www.modelscope.cn/models/deepseek-ai/DeepSeek-V3.1
44
+ """
45
+
46
+ def __init__(self):
47
+ super().__init__()
48
+ self.bot_token = "<|tool▁calls▁begin|>"
49
+ self.eot_token = "<|tool▁calls▁end|>"
50
+ self.func_call_regex = r"<|tool▁call▁begin|>.*?<|tool▁call▁end|>"
51
+ self.func_detail_regex = (
52
+ r"<|tool▁call▁begin|>(.*)<|tool▁sep|>(.*)<|tool▁call▁end|>"
53
+ )
54
+ self._last_arguments = ""
55
+ self.current_tool_id = -1
56
+
57
+ def has_tool_call(self, text: str) -> bool:
58
+ """Check if the text contains a deepseek format tool call."""
59
+ return self.bot_token in text
60
+
61
+ def detect_and_parse(self, text: str, tools: List[Tool]) -> StreamingParseResult:
62
+ """
63
+ One-time parsing: Detects and parses tool calls in the provided text.
64
+
65
+ :param text: The complete text to parse.
66
+ :param tools: List of available tools.
67
+ :return: ParseResult indicating success or failure, consumed text, leftover text, and parsed calls.
68
+ """
69
+ idx = text.find(self.bot_token)
70
+ normal_text = text[:idx].strip() if idx != -1 else text
71
+ if self.bot_token not in text:
72
+ return StreamingParseResult(normal_text=normal_text, calls=[])
73
+ match_result_list = re.findall(self.func_call_regex, text, re.DOTALL)
74
+ calls = []
75
+ try:
76
+ for match_result in match_result_list:
77
+ # Get function name
78
+ func_detail = re.search(self.func_detail_regex, match_result, re.DOTALL)
79
+ func_name = func_detail.group(1)
80
+ func_args = func_detail.group(2)
81
+ func_args = json.loads(func_args)
82
+ # construct match_result for parse_base_json
83
+ match_result = {"name": func_name, "parameters": func_args}
84
+ calls.extend(self.parse_base_json(match_result, tools))
85
+ return StreamingParseResult(normal_text=normal_text, calls=calls)
86
+ except Exception as e:
87
+ logger.error(f"Error in detect_and_parse: {e}")
88
+ # return the normal text if parsing fails
89
+ return StreamingParseResult(normal_text=text)
90
+
91
+ def parse_streaming_increment(
92
+ self, new_text: str, tools: List[Tool]
93
+ ) -> StreamingParseResult:
94
+ """
95
+ Streaming incremental parsing tool calls for DeepSeekV3 format.
96
+ """
97
+ self._buffer += new_text
98
+ current_text = self._buffer
99
+
100
+ # Check if we have a tool call (either the start token or individual tool call)
101
+ has_tool_call = (
102
+ self.bot_token in current_text or "<|tool▁call▁begin|>" in current_text
103
+ )
104
+
105
+ if not has_tool_call:
106
+ self._buffer = ""
107
+ for e_token in [self.eot_token, "<|tool▁call▁end|>"]:
108
+ if e_token in new_text:
109
+ new_text = new_text.replace(e_token, "")
110
+ return StreamingParseResult(normal_text=new_text)
111
+
112
+ if not hasattr(self, "_tool_indices"):
113
+ self._tool_indices = self._get_tool_indices(tools)
114
+
115
+ calls: list[ToolCallItem] = []
116
+ try:
117
+ partial_match = re.search(
118
+ pattern=r"<|tool▁call▁begin|>(.*)<|tool▁sep|>(.*)<|tool▁call▁end|>",
119
+ string=current_text,
120
+ flags=re.DOTALL,
121
+ )
122
+ if partial_match:
123
+ func_name = partial_match.group(1).strip()
124
+ func_args_raw = partial_match.group(2).strip()
125
+
126
+ # Initialize state if this is the first tool call
127
+ if self.current_tool_id == -1:
128
+ self.current_tool_id = 0
129
+ self.prev_tool_call_arr = []
130
+ self.streamed_args_for_tool = [""]
131
+
132
+ # Ensure we have enough entries in our tracking arrays
133
+ while len(self.prev_tool_call_arr) <= self.current_tool_id:
134
+ self.prev_tool_call_arr.append({})
135
+ while len(self.streamed_args_for_tool) <= self.current_tool_id:
136
+ self.streamed_args_for_tool.append("")
137
+
138
+ if not self.current_tool_name_sent:
139
+ calls.append(
140
+ ToolCallItem(
141
+ tool_index=self.current_tool_id,
142
+ name=func_name,
143
+ parameters="",
144
+ )
145
+ )
146
+ self.current_tool_name_sent = True
147
+ # Store the tool call info for serving layer completions endpoint
148
+ self.prev_tool_call_arr[self.current_tool_id] = {
149
+ "name": func_name,
150
+ "arguments": {},
151
+ }
152
+ else:
153
+ argument_diff = (
154
+ func_args_raw[len(self._last_arguments) :]
155
+ if func_args_raw.startswith(self._last_arguments)
156
+ else func_args_raw
157
+ )
158
+
159
+ if argument_diff:
160
+ calls.append(
161
+ ToolCallItem(
162
+ tool_index=self.current_tool_id,
163
+ name=None,
164
+ parameters=argument_diff,
165
+ )
166
+ )
167
+ self._last_arguments += argument_diff
168
+ self.streamed_args_for_tool[
169
+ self.current_tool_id
170
+ ] += argument_diff
171
+
172
+ if _is_complete_json(func_args_raw):
173
+ # Update the stored arguments
174
+ try:
175
+ parsed_args = json.loads(func_args_raw)
176
+ self.prev_tool_call_arr[self.current_tool_id][
177
+ "arguments"
178
+ ] = parsed_args
179
+ except json.JSONDecodeError:
180
+ pass
181
+
182
+ # Find the end of the current tool call and remove only that part from buffer
183
+ tool_call_end_pattern = (
184
+ r"<|tool▁call▁begin|>.*?<|tool▁call▁end|>"
185
+ )
186
+ match = re.search(
187
+ tool_call_end_pattern, current_text, re.DOTALL
188
+ )
189
+ if match:
190
+ # Remove the completed tool call from buffer, keep any remaining content
191
+ self._buffer = current_text[match.end() :]
192
+ else:
193
+ self._buffer = ""
194
+
195
+ result = StreamingParseResult(normal_text="", calls=calls)
196
+ self.current_tool_id += 1
197
+ self._last_arguments = ""
198
+ self.current_tool_name_sent = False
199
+ return result
200
+
201
+ return StreamingParseResult(normal_text="", calls=calls)
202
+
203
+ except Exception as e:
204
+ logger.error(f"Error in parse_streaming_increment: {e}")
205
+ return StreamingParseResult(normal_text=current_text)
206
+
207
+ def structure_info(self) -> _GetInfoFunc:
208
+ return lambda name: StructureInfo(
209
+ begin="<|tool▁call▁begin|>" + name + "<|tool▁sep|>",
210
+ end="<|tool▁call▁end|>",
211
+ trigger="<|tool▁call▁begin|>" + name + "<|tool▁sep|>",
212
+ )
213
+
214
+ def build_ebnf(self, tools: List[Tool]):
215
+ return EBNFComposer.build_ebnf(
216
+ tools,
217
+ sequence_start_token=self.bot_token,
218
+ sequence_end_token=self.eot_token,
219
+ tool_call_separator="",
220
+ call_rule_fmt='"<|tool▁call▁begin|>{name}<|tool▁sep|>{arguments_rule}<|tool▁call▁end|>"',
221
+ function_format="json",
222
+ )
@@ -10,6 +10,7 @@ from sglang.srt.entrypoints.openai.protocol import (
10
10
  from sglang.srt.function_call.base_format_detector import BaseFormatDetector
11
11
  from sglang.srt.function_call.core_types import ToolCallItem
12
12
  from sglang.srt.function_call.deepseekv3_detector import DeepSeekV3Detector
13
+ from sglang.srt.function_call.deepseekv31_detector import DeepSeekV31Detector
13
14
  from sglang.srt.function_call.glm4_moe_detector import Glm4MoeDetector
14
15
  from sglang.srt.function_call.gpt_oss_detector import GptOssDetector
15
16
  from sglang.srt.function_call.kimik2_detector import KimiK2Detector
@@ -37,6 +38,7 @@ class FunctionCallParser:
37
38
  "qwen25": Qwen25Detector,
38
39
  "mistral": MistralDetector,
39
40
  "deepseekv3": DeepSeekV3Detector,
41
+ "deepseekv31": DeepSeekV31Detector,
40
42
  "pythonic": PythonicDetector,
41
43
  "kimi_k2": KimiK2Detector,
42
44
  "qwen3_coder": Qwen3CoderDetector,