sglang 0.5.1.post2__py3-none-any.whl → 0.5.2rc0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sglang/bench_one_batch.py +3 -0
- sglang/bench_one_batch_server.py +79 -53
- sglang/bench_serving.py +186 -14
- sglang/profiler.py +0 -1
- sglang/srt/configs/__init__.py +2 -0
- sglang/srt/configs/longcat_flash.py +104 -0
- sglang/srt/configs/model_config.py +12 -0
- sglang/srt/connector/__init__.py +1 -1
- sglang/srt/connector/base_connector.py +1 -2
- sglang/srt/connector/redis.py +2 -2
- sglang/srt/connector/serde/__init__.py +1 -1
- sglang/srt/connector/serde/safe_serde.py +4 -3
- sglang/srt/conversation.py +38 -5
- sglang/srt/disaggregation/ascend/conn.py +75 -0
- sglang/srt/disaggregation/launch_lb.py +0 -13
- sglang/srt/disaggregation/mini_lb.py +33 -8
- sglang/srt/disaggregation/prefill.py +1 -1
- sglang/srt/distributed/parallel_state.py +24 -14
- sglang/srt/entrypoints/engine.py +19 -12
- sglang/srt/entrypoints/http_server.py +174 -34
- sglang/srt/entrypoints/openai/protocol.py +87 -24
- sglang/srt/entrypoints/openai/serving_chat.py +50 -9
- sglang/srt/entrypoints/openai/serving_completions.py +15 -0
- sglang/srt/eplb/eplb_manager.py +26 -2
- sglang/srt/eplb/expert_distribution.py +29 -2
- sglang/srt/function_call/deepseekv31_detector.py +222 -0
- sglang/srt/function_call/function_call_parser.py +2 -0
- sglang/srt/function_call/gpt_oss_detector.py +144 -256
- sglang/srt/harmony_parser.py +588 -0
- sglang/srt/hf_transformers_utils.py +26 -7
- sglang/srt/layers/activation.py +12 -0
- sglang/srt/layers/attention/ascend_backend.py +374 -136
- sglang/srt/layers/attention/flashattention_backend.py +241 -7
- sglang/srt/layers/attention/flashinfer_backend.py +5 -2
- sglang/srt/layers/attention/flashinfer_mla_backend.py +5 -2
- sglang/srt/layers/attention/hybrid_attn_backend.py +53 -21
- sglang/srt/layers/attention/trtllm_mla_backend.py +25 -10
- sglang/srt/layers/communicator.py +1 -2
- sglang/srt/layers/layernorm.py +28 -3
- sglang/srt/layers/linear.py +3 -2
- sglang/srt/layers/logits_processor.py +1 -1
- sglang/srt/layers/moe/cutlass_moe.py +0 -8
- sglang/srt/layers/moe/ep_moe/kernels.py +74 -0
- sglang/srt/layers/moe/ep_moe/layer.py +13 -13
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=129,N=352,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=257,N=64,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- sglang/srt/layers/moe/topk.py +35 -12
- sglang/srt/layers/quantization/deep_gemm_wrapper/compile_utils.py +133 -235
- sglang/srt/layers/quantization/deep_gemm_wrapper/configurer.py +5 -10
- sglang/srt/layers/quantization/deep_gemm_wrapper/entrypoint.py +5 -23
- sglang/srt/layers/quantization/fp8.py +2 -1
- sglang/srt/layers/quantization/fp8_kernel.py +2 -2
- sglang/srt/layers/quantization/fp8_utils.py +2 -2
- sglang/srt/layers/quantization/modelopt_quant.py +7 -0
- sglang/srt/layers/quantization/mxfp4.py +25 -27
- sglang/srt/layers/quantization/mxfp4_tensor.py +3 -1
- sglang/srt/layers/quantization/utils.py +13 -0
- sglang/srt/layers/quantization/w8a8_int8.py +7 -3
- sglang/srt/layers/rotary_embedding.py +28 -1
- sglang/srt/layers/sampler.py +29 -5
- sglang/srt/layers/utils.py +0 -14
- sglang/srt/managers/cache_controller.py +237 -204
- sglang/srt/managers/detokenizer_manager.py +48 -2
- sglang/srt/managers/io_struct.py +57 -0
- sglang/srt/managers/mm_utils.py +5 -1
- sglang/srt/managers/multi_tokenizer_mixin.py +591 -0
- sglang/srt/managers/scheduler.py +94 -9
- sglang/srt/managers/scheduler_output_processor_mixin.py +20 -18
- sglang/srt/managers/scheduler_update_weights_mixin.py +8 -1
- sglang/srt/managers/tokenizer_manager.py +122 -42
- sglang/srt/mem_cache/chunk_cache.py +1 -1
- sglang/srt/mem_cache/hicache_storage.py +51 -23
- sglang/srt/mem_cache/hiradix_cache.py +87 -71
- sglang/srt/mem_cache/lora_radix_cache.py +1 -1
- sglang/srt/mem_cache/memory_pool.py +77 -14
- sglang/srt/mem_cache/memory_pool_host.py +4 -5
- sglang/srt/mem_cache/radix_cache.py +6 -4
- sglang/srt/mem_cache/radix_cache_cpp.py +1 -1
- sglang/srt/mem_cache/storage/hf3fs/storage_hf3fs.py +38 -20
- sglang/srt/mem_cache/storage/mooncake_store/mooncake_store.py +87 -82
- sglang/srt/mem_cache/swa_radix_cache.py +1 -1
- sglang/srt/model_executor/model_runner.py +6 -5
- sglang/srt/model_loader/loader.py +15 -24
- sglang/srt/model_loader/utils.py +12 -0
- sglang/srt/models/deepseek_v2.py +38 -13
- sglang/srt/models/gpt_oss.py +2 -15
- sglang/srt/models/llama_eagle3.py +4 -0
- sglang/srt/models/longcat_flash.py +1015 -0
- sglang/srt/models/longcat_flash_nextn.py +691 -0
- sglang/srt/models/qwen2.py +26 -3
- sglang/srt/models/qwen2_5_vl.py +66 -41
- sglang/srt/models/qwen2_moe.py +22 -2
- sglang/srt/models/transformers.py +1 -1
- sglang/srt/multimodal/processors/base_processor.py +4 -2
- sglang/srt/reasoning_parser.py +56 -300
- sglang/srt/sampling/penaltylib/orchestrator.py +14 -2
- sglang/srt/server_args.py +122 -56
- sglang/srt/speculative/eagle_worker.py +28 -8
- sglang/srt/tokenizer/tiktoken_tokenizer.py +6 -1
- sglang/srt/utils.py +73 -5
- sglang/test/attention/test_trtllm_mla_backend.py +12 -3
- sglang/version.py +1 -1
- {sglang-0.5.1.post2.dist-info → sglang-0.5.2rc0.dist-info}/METADATA +7 -6
- {sglang-0.5.1.post2.dist-info → sglang-0.5.2rc0.dist-info}/RECORD +107 -99
- {sglang-0.5.1.post2.dist-info → sglang-0.5.2rc0.dist-info}/WHEEL +0 -0
- {sglang-0.5.1.post2.dist-info → sglang-0.5.2rc0.dist-info}/licenses/LICENSE +0 -0
- {sglang-0.5.1.post2.dist-info → sglang-0.5.2rc0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,222 @@
|
|
1
|
+
import json
|
2
|
+
import logging
|
3
|
+
import re
|
4
|
+
from typing import List
|
5
|
+
|
6
|
+
from sglang.srt.entrypoints.openai.protocol import Tool
|
7
|
+
from sglang.srt.function_call.base_format_detector import BaseFormatDetector
|
8
|
+
from sglang.srt.function_call.core_types import (
|
9
|
+
StreamingParseResult,
|
10
|
+
StructureInfo,
|
11
|
+
ToolCallItem,
|
12
|
+
_GetInfoFunc,
|
13
|
+
)
|
14
|
+
from sglang.srt.function_call.ebnf_composer import EBNFComposer
|
15
|
+
from sglang.srt.function_call.utils import _is_complete_json
|
16
|
+
|
17
|
+
logger = logging.getLogger(__name__)
|
18
|
+
|
19
|
+
|
20
|
+
class DeepSeekV31Detector(BaseFormatDetector):
|
21
|
+
"""
|
22
|
+
Detector for DeepSeek V3 model function call format.
|
23
|
+
|
24
|
+
The DeepSeek V3 format uses special Unicode tokens to delimit function calls
|
25
|
+
with JSON code blocks for arguments.
|
26
|
+
|
27
|
+
Format Structure:
|
28
|
+
```
|
29
|
+
<|tool▁calls▁begin|><|tool▁call▁begin|>{function_name}<|tool▁sep|>{json_arguments}<|tool▁calls▁end|><|end▁of▁sentence|>
|
30
|
+
```
|
31
|
+
Examples:
|
32
|
+
```
|
33
|
+
<|tool▁calls▁begin|><|tool▁call▁begin|>get_current_weather<|tool▁sep|>{"location": "Tokyo"}<|tool▁call▁end|><|tool▁call▁begin|>get_current_weather<|tool▁sep|>{"location": "Paris"}<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|>
|
34
|
+
```
|
35
|
+
|
36
|
+
Key Components:
|
37
|
+
- Tool Calls Section: Wrapped between `<|tool▁calls▁begin|>` and `<|tool▁calls▁end|>`
|
38
|
+
- Individual Tool Call: Wrapped between `<|tool▁call▁begin|>` and `<|tool▁call▁end|>`
|
39
|
+
- Function Declaration: `<|tool▁call▁begin|>{function_name}<|tool▁sep|>`
|
40
|
+
- Arguments: JSON code block between `<|tool▁sep|>` and `<|tool▁call▁end|>`
|
41
|
+
- Supports multiple tool calls
|
42
|
+
|
43
|
+
Reference: https://www.modelscope.cn/models/deepseek-ai/DeepSeek-V3.1
|
44
|
+
"""
|
45
|
+
|
46
|
+
def __init__(self):
|
47
|
+
super().__init__()
|
48
|
+
self.bot_token = "<|tool▁calls▁begin|>"
|
49
|
+
self.eot_token = "<|tool▁calls▁end|>"
|
50
|
+
self.func_call_regex = r"<|tool▁call▁begin|>.*?<|tool▁call▁end|>"
|
51
|
+
self.func_detail_regex = (
|
52
|
+
r"<|tool▁call▁begin|>(.*)<|tool▁sep|>(.*)<|tool▁call▁end|>"
|
53
|
+
)
|
54
|
+
self._last_arguments = ""
|
55
|
+
self.current_tool_id = -1
|
56
|
+
|
57
|
+
def has_tool_call(self, text: str) -> bool:
|
58
|
+
"""Check if the text contains a deepseek format tool call."""
|
59
|
+
return self.bot_token in text
|
60
|
+
|
61
|
+
def detect_and_parse(self, text: str, tools: List[Tool]) -> StreamingParseResult:
|
62
|
+
"""
|
63
|
+
One-time parsing: Detects and parses tool calls in the provided text.
|
64
|
+
|
65
|
+
:param text: The complete text to parse.
|
66
|
+
:param tools: List of available tools.
|
67
|
+
:return: ParseResult indicating success or failure, consumed text, leftover text, and parsed calls.
|
68
|
+
"""
|
69
|
+
idx = text.find(self.bot_token)
|
70
|
+
normal_text = text[:idx].strip() if idx != -1 else text
|
71
|
+
if self.bot_token not in text:
|
72
|
+
return StreamingParseResult(normal_text=normal_text, calls=[])
|
73
|
+
match_result_list = re.findall(self.func_call_regex, text, re.DOTALL)
|
74
|
+
calls = []
|
75
|
+
try:
|
76
|
+
for match_result in match_result_list:
|
77
|
+
# Get function name
|
78
|
+
func_detail = re.search(self.func_detail_regex, match_result, re.DOTALL)
|
79
|
+
func_name = func_detail.group(1)
|
80
|
+
func_args = func_detail.group(2)
|
81
|
+
func_args = json.loads(func_args)
|
82
|
+
# construct match_result for parse_base_json
|
83
|
+
match_result = {"name": func_name, "parameters": func_args}
|
84
|
+
calls.extend(self.parse_base_json(match_result, tools))
|
85
|
+
return StreamingParseResult(normal_text=normal_text, calls=calls)
|
86
|
+
except Exception as e:
|
87
|
+
logger.error(f"Error in detect_and_parse: {e}")
|
88
|
+
# return the normal text if parsing fails
|
89
|
+
return StreamingParseResult(normal_text=text)
|
90
|
+
|
91
|
+
def parse_streaming_increment(
|
92
|
+
self, new_text: str, tools: List[Tool]
|
93
|
+
) -> StreamingParseResult:
|
94
|
+
"""
|
95
|
+
Streaming incremental parsing tool calls for DeepSeekV3 format.
|
96
|
+
"""
|
97
|
+
self._buffer += new_text
|
98
|
+
current_text = self._buffer
|
99
|
+
|
100
|
+
# Check if we have a tool call (either the start token or individual tool call)
|
101
|
+
has_tool_call = (
|
102
|
+
self.bot_token in current_text or "<|tool▁call▁begin|>" in current_text
|
103
|
+
)
|
104
|
+
|
105
|
+
if not has_tool_call:
|
106
|
+
self._buffer = ""
|
107
|
+
for e_token in [self.eot_token, "<|tool▁call▁end|>"]:
|
108
|
+
if e_token in new_text:
|
109
|
+
new_text = new_text.replace(e_token, "")
|
110
|
+
return StreamingParseResult(normal_text=new_text)
|
111
|
+
|
112
|
+
if not hasattr(self, "_tool_indices"):
|
113
|
+
self._tool_indices = self._get_tool_indices(tools)
|
114
|
+
|
115
|
+
calls: list[ToolCallItem] = []
|
116
|
+
try:
|
117
|
+
partial_match = re.search(
|
118
|
+
pattern=r"<|tool▁call▁begin|>(.*)<|tool▁sep|>(.*)<|tool▁call▁end|>",
|
119
|
+
string=current_text,
|
120
|
+
flags=re.DOTALL,
|
121
|
+
)
|
122
|
+
if partial_match:
|
123
|
+
func_name = partial_match.group(1).strip()
|
124
|
+
func_args_raw = partial_match.group(2).strip()
|
125
|
+
|
126
|
+
# Initialize state if this is the first tool call
|
127
|
+
if self.current_tool_id == -1:
|
128
|
+
self.current_tool_id = 0
|
129
|
+
self.prev_tool_call_arr = []
|
130
|
+
self.streamed_args_for_tool = [""]
|
131
|
+
|
132
|
+
# Ensure we have enough entries in our tracking arrays
|
133
|
+
while len(self.prev_tool_call_arr) <= self.current_tool_id:
|
134
|
+
self.prev_tool_call_arr.append({})
|
135
|
+
while len(self.streamed_args_for_tool) <= self.current_tool_id:
|
136
|
+
self.streamed_args_for_tool.append("")
|
137
|
+
|
138
|
+
if not self.current_tool_name_sent:
|
139
|
+
calls.append(
|
140
|
+
ToolCallItem(
|
141
|
+
tool_index=self.current_tool_id,
|
142
|
+
name=func_name,
|
143
|
+
parameters="",
|
144
|
+
)
|
145
|
+
)
|
146
|
+
self.current_tool_name_sent = True
|
147
|
+
# Store the tool call info for serving layer completions endpoint
|
148
|
+
self.prev_tool_call_arr[self.current_tool_id] = {
|
149
|
+
"name": func_name,
|
150
|
+
"arguments": {},
|
151
|
+
}
|
152
|
+
else:
|
153
|
+
argument_diff = (
|
154
|
+
func_args_raw[len(self._last_arguments) :]
|
155
|
+
if func_args_raw.startswith(self._last_arguments)
|
156
|
+
else func_args_raw
|
157
|
+
)
|
158
|
+
|
159
|
+
if argument_diff:
|
160
|
+
calls.append(
|
161
|
+
ToolCallItem(
|
162
|
+
tool_index=self.current_tool_id,
|
163
|
+
name=None,
|
164
|
+
parameters=argument_diff,
|
165
|
+
)
|
166
|
+
)
|
167
|
+
self._last_arguments += argument_diff
|
168
|
+
self.streamed_args_for_tool[
|
169
|
+
self.current_tool_id
|
170
|
+
] += argument_diff
|
171
|
+
|
172
|
+
if _is_complete_json(func_args_raw):
|
173
|
+
# Update the stored arguments
|
174
|
+
try:
|
175
|
+
parsed_args = json.loads(func_args_raw)
|
176
|
+
self.prev_tool_call_arr[self.current_tool_id][
|
177
|
+
"arguments"
|
178
|
+
] = parsed_args
|
179
|
+
except json.JSONDecodeError:
|
180
|
+
pass
|
181
|
+
|
182
|
+
# Find the end of the current tool call and remove only that part from buffer
|
183
|
+
tool_call_end_pattern = (
|
184
|
+
r"<|tool▁call▁begin|>.*?<|tool▁call▁end|>"
|
185
|
+
)
|
186
|
+
match = re.search(
|
187
|
+
tool_call_end_pattern, current_text, re.DOTALL
|
188
|
+
)
|
189
|
+
if match:
|
190
|
+
# Remove the completed tool call from buffer, keep any remaining content
|
191
|
+
self._buffer = current_text[match.end() :]
|
192
|
+
else:
|
193
|
+
self._buffer = ""
|
194
|
+
|
195
|
+
result = StreamingParseResult(normal_text="", calls=calls)
|
196
|
+
self.current_tool_id += 1
|
197
|
+
self._last_arguments = ""
|
198
|
+
self.current_tool_name_sent = False
|
199
|
+
return result
|
200
|
+
|
201
|
+
return StreamingParseResult(normal_text="", calls=calls)
|
202
|
+
|
203
|
+
except Exception as e:
|
204
|
+
logger.error(f"Error in parse_streaming_increment: {e}")
|
205
|
+
return StreamingParseResult(normal_text=current_text)
|
206
|
+
|
207
|
+
def structure_info(self) -> _GetInfoFunc:
|
208
|
+
return lambda name: StructureInfo(
|
209
|
+
begin="<|tool▁call▁begin|>" + name + "<|tool▁sep|>",
|
210
|
+
end="<|tool▁call▁end|>",
|
211
|
+
trigger="<|tool▁call▁begin|>" + name + "<|tool▁sep|>",
|
212
|
+
)
|
213
|
+
|
214
|
+
def build_ebnf(self, tools: List[Tool]):
|
215
|
+
return EBNFComposer.build_ebnf(
|
216
|
+
tools,
|
217
|
+
sequence_start_token=self.bot_token,
|
218
|
+
sequence_end_token=self.eot_token,
|
219
|
+
tool_call_separator="",
|
220
|
+
call_rule_fmt='"<|tool▁call▁begin|>{name}<|tool▁sep|>{arguments_rule}<|tool▁call▁end|>"',
|
221
|
+
function_format="json",
|
222
|
+
)
|
@@ -10,6 +10,7 @@ from sglang.srt.entrypoints.openai.protocol import (
|
|
10
10
|
from sglang.srt.function_call.base_format_detector import BaseFormatDetector
|
11
11
|
from sglang.srt.function_call.core_types import ToolCallItem
|
12
12
|
from sglang.srt.function_call.deepseekv3_detector import DeepSeekV3Detector
|
13
|
+
from sglang.srt.function_call.deepseekv31_detector import DeepSeekV31Detector
|
13
14
|
from sglang.srt.function_call.glm4_moe_detector import Glm4MoeDetector
|
14
15
|
from sglang.srt.function_call.gpt_oss_detector import GptOssDetector
|
15
16
|
from sglang.srt.function_call.kimik2_detector import KimiK2Detector
|
@@ -37,6 +38,7 @@ class FunctionCallParser:
|
|
37
38
|
"qwen25": Qwen25Detector,
|
38
39
|
"mistral": MistralDetector,
|
39
40
|
"deepseekv3": DeepSeekV3Detector,
|
41
|
+
"deepseekv31": DeepSeekV31Detector,
|
40
42
|
"pythonic": PythonicDetector,
|
41
43
|
"kimi_k2": KimiK2Detector,
|
42
44
|
"qwen3_coder": Qwen3CoderDetector,
|