sglang 0.5.1.post1__py3-none-any.whl → 0.5.1.post3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sglang/bench_one_batch_server.py +79 -53
- sglang/bench_serving.py +186 -14
- sglang/profiler.py +0 -1
- sglang/srt/conversation.py +38 -5
- sglang/srt/disaggregation/decode.py +4 -0
- sglang/srt/disaggregation/prefill.py +4 -0
- sglang/srt/entrypoints/engine.py +2 -2
- sglang/srt/entrypoints/openai/protocol.py +27 -24
- sglang/srt/entrypoints/openai/serving_chat.py +50 -9
- sglang/srt/entrypoints/openai/serving_completions.py +15 -0
- sglang/srt/entrypoints/tool.py +7 -7
- sglang/srt/function_call/deepseekv31_detector.py +222 -0
- sglang/srt/function_call/function_call_parser.py +2 -0
- sglang/srt/function_call/gpt_oss_detector.py +144 -256
- sglang/srt/harmony_parser.py +588 -0
- sglang/srt/hf_transformers_utils.py +16 -7
- sglang/srt/layers/attention/ascend_backend.py +218 -111
- sglang/srt/layers/attention/flashattention_backend.py +241 -7
- sglang/srt/layers/attention/flashinfer_backend.py +5 -2
- sglang/srt/layers/attention/flashinfer_mla_backend.py +76 -91
- sglang/srt/layers/attention/utils.py +15 -94
- sglang/srt/layers/communicator.py +1 -2
- sglang/srt/layers/moe/cutlass_moe.py +0 -15
- sglang/srt/layers/moe/ep_moe/layer.py +1 -7
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=257,N=64,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- sglang/srt/layers/moe/topk.py +1 -1
- sglang/srt/layers/quantization/deep_gemm_wrapper/compile_utils.py +133 -235
- sglang/srt/layers/quantization/deep_gemm_wrapper/configurer.py +5 -7
- sglang/srt/layers/quantization/deep_gemm_wrapper/entrypoint.py +5 -23
- sglang/srt/layers/quantization/fp8.py +2 -1
- sglang/srt/layers/quantization/fp8_kernel.py +2 -2
- sglang/srt/layers/quantization/fp8_utils.py +2 -2
- sglang/srt/layers/quantization/modelopt_quant.py +2 -2
- sglang/srt/layers/quantization/mxfp4.py +16 -23
- sglang/srt/layers/quantization/mxfp4_tensor.py +3 -1
- sglang/srt/layers/utils.py +0 -14
- sglang/srt/lora/lora_manager.py +29 -12
- sglang/srt/managers/cache_controller.py +223 -156
- sglang/srt/managers/detokenizer_manager.py +5 -0
- sglang/srt/managers/io_struct.py +30 -0
- sglang/srt/managers/scheduler.py +58 -7
- sglang/srt/managers/scheduler_metrics_mixin.py +15 -0
- sglang/srt/managers/tokenizer_manager.py +36 -3
- sglang/srt/mem_cache/hicache_storage.py +31 -20
- sglang/srt/mem_cache/hiradix_cache.py +12 -3
- sglang/srt/mem_cache/memory_pool.py +73 -14
- sglang/srt/mem_cache/memory_pool_host.py +3 -2
- sglang/srt/mem_cache/radix_cache.py +1 -0
- sglang/srt/mem_cache/storage/hf3fs/storage_hf3fs.py +5 -13
- sglang/srt/mem_cache/storage/mooncake_store/mooncake_store.py +85 -81
- sglang/srt/metrics/collector.py +5 -5
- sglang/srt/model_executor/cuda_graph_runner.py +2 -2
- sglang/srt/model_executor/model_runner.py +1 -1
- sglang/srt/models/deepseek_v2.py +12 -3
- sglang/srt/models/gpt_oss.py +2 -1
- sglang/srt/models/qwen2_5_vl.py +1 -0
- sglang/srt/offloader.py +115 -0
- sglang/srt/reasoning_parser.py +56 -300
- sglang/srt/server_args.py +10 -5
- sglang/srt/tokenizer/tiktoken_tokenizer.py +6 -1
- sglang/srt/utils.py +59 -12
- sglang/test/test_cutlass_moe.py +33 -28
- sglang/version.py +1 -1
- {sglang-0.5.1.post1.dist-info → sglang-0.5.1.post3.dist-info}/METADATA +6 -5
- {sglang-0.5.1.post1.dist-info → sglang-0.5.1.post3.dist-info}/RECORD +69 -65
- {sglang-0.5.1.post1.dist-info → sglang-0.5.1.post3.dist-info}/WHEEL +0 -0
- {sglang-0.5.1.post1.dist-info → sglang-0.5.1.post3.dist-info}/licenses/LICENSE +0 -0
- {sglang-0.5.1.post1.dist-info → sglang-0.5.1.post3.dist-info}/top_level.txt +0 -0
@@ -35,6 +35,8 @@ from pydantic import (
|
|
35
35
|
)
|
36
36
|
from typing_extensions import Literal
|
37
37
|
|
38
|
+
DEFAULT_MODEL_NAME = "default"
|
39
|
+
|
38
40
|
|
39
41
|
class ModelCard(BaseModel):
|
40
42
|
"""Model cards."""
|
@@ -108,6 +110,23 @@ class JsonSchemaResponseFormat(BaseModel):
|
|
108
110
|
strict: Optional[bool] = False
|
109
111
|
|
110
112
|
|
113
|
+
class ResponseFormat(BaseModel):
|
114
|
+
type: Literal["text", "json_object", "json_schema"]
|
115
|
+
json_schema: Optional[JsonSchemaResponseFormat] = None
|
116
|
+
|
117
|
+
|
118
|
+
class StructuresResponseFormat(BaseModel):
|
119
|
+
begin: str
|
120
|
+
schema_: Optional[Dict[str, object]] = Field(alias="schema", default=None)
|
121
|
+
end: str
|
122
|
+
|
123
|
+
|
124
|
+
class StructuralTagResponseFormat(BaseModel):
|
125
|
+
type: Literal["structural_tag"]
|
126
|
+
structures: List[StructuresResponseFormat]
|
127
|
+
triggers: List[str]
|
128
|
+
|
129
|
+
|
111
130
|
class FileRequest(BaseModel):
|
112
131
|
# https://platform.openai.com/docs/api-reference/files/create
|
113
132
|
file: bytes # The File object (not file name) to be uploaded
|
@@ -166,7 +185,7 @@ class BatchResponse(BaseModel):
|
|
166
185
|
class CompletionRequest(BaseModel):
|
167
186
|
# Ordered by official OpenAI API documentation
|
168
187
|
# https://platform.openai.com/docs/api-reference/completions/create
|
169
|
-
model: str
|
188
|
+
model: str = DEFAULT_MODEL_NAME
|
170
189
|
prompt: Union[List[int], List[List[int]], str, List[str]]
|
171
190
|
best_of: Optional[int] = None
|
172
191
|
echo: bool = False
|
@@ -200,6 +219,7 @@ class CompletionRequest(BaseModel):
|
|
200
219
|
skip_special_tokens: bool = True
|
201
220
|
lora_path: Optional[Union[List[Optional[str]], Optional[str]]] = None
|
202
221
|
session_params: Optional[Dict] = None
|
222
|
+
response_format: Optional[Union[ResponseFormat, StructuralTagResponseFormat]] = None
|
203
223
|
|
204
224
|
# For PD disaggregation
|
205
225
|
bootstrap_host: Optional[Union[List[str], str]] = None
|
@@ -327,7 +347,7 @@ class ToolCall(BaseModel):
|
|
327
347
|
|
328
348
|
|
329
349
|
class ChatCompletionMessageGenericParam(BaseModel):
|
330
|
-
role: Literal["system", "assistant", "tool"]
|
350
|
+
role: Literal["system", "assistant", "tool", "function"]
|
331
351
|
content: Union[str, List[ChatCompletionMessageContentTextPart], None] = Field(
|
332
352
|
default=None
|
333
353
|
)
|
@@ -341,9 +361,9 @@ class ChatCompletionMessageGenericParam(BaseModel):
|
|
341
361
|
def _normalize_role(cls, v):
|
342
362
|
if isinstance(v, str):
|
343
363
|
v_lower = v.lower()
|
344
|
-
if v_lower not in {"system", "assistant", "tool"}:
|
364
|
+
if v_lower not in {"system", "assistant", "tool", "function"}:
|
345
365
|
raise ValueError(
|
346
|
-
"'role' must be one of 'system', 'assistant', or '
|
366
|
+
"'role' must be one of 'system', 'assistant', 'tool', or 'function' (case-insensitive)."
|
347
367
|
)
|
348
368
|
return v_lower
|
349
369
|
raise ValueError("'role' must be a string")
|
@@ -359,23 +379,6 @@ ChatCompletionMessageParam = Union[
|
|
359
379
|
]
|
360
380
|
|
361
381
|
|
362
|
-
class ResponseFormat(BaseModel):
|
363
|
-
type: Literal["text", "json_object", "json_schema"]
|
364
|
-
json_schema: Optional[JsonSchemaResponseFormat] = None
|
365
|
-
|
366
|
-
|
367
|
-
class StructuresResponseFormat(BaseModel):
|
368
|
-
begin: str
|
369
|
-
schema_: Optional[Dict[str, object]] = Field(alias="schema", default=None)
|
370
|
-
end: str
|
371
|
-
|
372
|
-
|
373
|
-
class StructuralTagResponseFormat(BaseModel):
|
374
|
-
type: Literal["structural_tag"]
|
375
|
-
structures: List[StructuresResponseFormat]
|
376
|
-
triggers: List[str]
|
377
|
-
|
378
|
-
|
379
382
|
class Function(BaseModel):
|
380
383
|
"""Function descriptions."""
|
381
384
|
|
@@ -409,7 +412,7 @@ class ChatCompletionRequest(BaseModel):
|
|
409
412
|
# Ordered by official OpenAI API documentation
|
410
413
|
# https://platform.openai.com/docs/api-reference/chat/create
|
411
414
|
messages: List[ChatCompletionMessageParam]
|
412
|
-
model: str
|
415
|
+
model: str = DEFAULT_MODEL_NAME
|
413
416
|
frequency_penalty: float = 0.0
|
414
417
|
logit_bias: Optional[Dict[str, float]] = None
|
415
418
|
logprobs: bool = False
|
@@ -571,7 +574,7 @@ class EmbeddingRequest(BaseModel):
|
|
571
574
|
# Ordered by official OpenAI API documentation
|
572
575
|
# https://platform.openai.com/docs/api-reference/embeddings/create
|
573
576
|
input: EmbeddingInput
|
574
|
-
model: str
|
577
|
+
model: str = DEFAULT_MODEL_NAME
|
575
578
|
encoding_format: str = "float"
|
576
579
|
dimensions: Optional[int] = None
|
577
580
|
user: Optional[str] = None
|
@@ -605,7 +608,7 @@ class ScoringRequest(BaseModel):
|
|
605
608
|
)
|
606
609
|
apply_softmax: bool = False
|
607
610
|
item_first: bool = False
|
608
|
-
model: str
|
611
|
+
model: str = DEFAULT_MODEL_NAME
|
609
612
|
|
610
613
|
|
611
614
|
class ScoringResponse(BaseModel):
|
@@ -148,6 +148,16 @@ class OpenAIServingChat(OpenAIServingBase):
|
|
148
148
|
self, request: ChatCompletionRequest, is_multimodal: bool
|
149
149
|
) -> MessageProcessingResult:
|
150
150
|
"""Process chat messages and apply chat template"""
|
151
|
+
is_gpt_oss = (
|
152
|
+
hasattr(self.tokenizer_manager.model_config, "hf_config")
|
153
|
+
and hasattr(self.tokenizer_manager.model_config.hf_config, "model_type")
|
154
|
+
and self.tokenizer_manager.model_config.hf_config.model_type == "gpt_oss"
|
155
|
+
)
|
156
|
+
|
157
|
+
# GptOss model needs to keep special tokens for harmony parsing
|
158
|
+
if is_gpt_oss:
|
159
|
+
request.skip_special_tokens = False
|
160
|
+
|
151
161
|
tool_call_constraint = None
|
152
162
|
|
153
163
|
# Apply chat template and its stop strings
|
@@ -207,6 +217,25 @@ class OpenAIServingChat(OpenAIServingBase):
|
|
207
217
|
audio_data,
|
208
218
|
modalities,
|
209
219
|
)
|
220
|
+
|
221
|
+
# per the Transformers docs & maintainers, tool call arguments in
|
222
|
+
# assistant-role messages with tool_calls need to be dicts not JSON str -
|
223
|
+
# this is how tool-use chat templates will expect them moving forwards
|
224
|
+
# so, for messages that have tool_calls, parse the string (which we get
|
225
|
+
# from openAI format) to dict
|
226
|
+
if (
|
227
|
+
processed_msg["role"] == "assistant"
|
228
|
+
and "tool_calls" in processed_msg
|
229
|
+
and isinstance(processed_msg["tool_calls"], list)
|
230
|
+
):
|
231
|
+
for item in processed_msg["tool_calls"]:
|
232
|
+
if "arguments" in item["function"] and isinstance(
|
233
|
+
item["function"]["arguments"], str
|
234
|
+
):
|
235
|
+
item["function"]["arguments"] = json.loads(
|
236
|
+
item["function"]["arguments"]
|
237
|
+
)
|
238
|
+
|
210
239
|
openai_compatible_messages.append(processed_msg)
|
211
240
|
|
212
241
|
# Handle assistant prefix for continue_final_message
|
@@ -806,15 +835,23 @@ class OpenAIServingChat(OpenAIServingBase):
|
|
806
835
|
finish_reason["matched"] = None
|
807
836
|
try:
|
808
837
|
text, call_info_list = parser.parse_non_stream(text)
|
809
|
-
tool_calls = [
|
810
|
-
|
811
|
-
|
812
|
-
|
813
|
-
|
814
|
-
|
838
|
+
tool_calls = []
|
839
|
+
for call_info in call_info_list:
|
840
|
+
# For Kimi-K2, align tool_call_id with the model format: functions.{name}:{index}
|
841
|
+
if tool_call_parser == "kimi_k2" and call_info.name is not None:
|
842
|
+
tool_id = f"functions.{call_info.name}:{call_info.tool_index}"
|
843
|
+
else:
|
844
|
+
tool_id = f"call_{uuid.uuid4().hex[:24]}"
|
845
|
+
|
846
|
+
tool_calls.append(
|
847
|
+
ToolCall(
|
848
|
+
id=tool_id,
|
849
|
+
index=getattr(call_info, "tool_index", None),
|
850
|
+
function=FunctionResponse(
|
851
|
+
name=call_info.name, arguments=call_info.parameters
|
852
|
+
),
|
853
|
+
)
|
815
854
|
)
|
816
|
-
for call_info in call_info_list
|
817
|
-
]
|
818
855
|
return tool_calls, text, finish_reason
|
819
856
|
except Exception as e:
|
820
857
|
logger.error(f"Tool call parsing error: {e}")
|
@@ -925,7 +962,11 @@ class OpenAIServingChat(OpenAIServingBase):
|
|
925
962
|
# Tool call ID should be generated only once per tool call
|
926
963
|
if call_item.name:
|
927
964
|
# First chunk: include ID and function name
|
928
|
-
|
965
|
+
if self.tokenizer_manager.server_args.tool_call_parser == "kimi_k2":
|
966
|
+
# Align with Kimi-K2 format: functions.{name}:{index}
|
967
|
+
tool_call_id = f"functions.{call_item.name}:{call_item.tool_index}"
|
968
|
+
else:
|
969
|
+
tool_call_id = f"call_{uuid.uuid4().hex[:24]}"
|
929
970
|
function_name = call_item.name
|
930
971
|
else:
|
931
972
|
# Subsequent chunks: null ID and name for argument deltas
|
@@ -23,6 +23,7 @@ from sglang.srt.entrypoints.openai.utils import (
|
|
23
23
|
from sglang.srt.managers.io_struct import GenerateReqInput
|
24
24
|
from sglang.srt.managers.template_manager import TemplateManager
|
25
25
|
from sglang.srt.managers.tokenizer_manager import TokenizerManager
|
26
|
+
from sglang.utils import convert_json_schema_to_str
|
26
27
|
|
27
28
|
logger = logging.getLogger(__name__)
|
28
29
|
|
@@ -125,6 +126,20 @@ class OpenAIServingCompletion(OpenAIServingBase):
|
|
125
126
|
"logit_bias": request.logit_bias,
|
126
127
|
}
|
127
128
|
|
129
|
+
# Handle response_format constraints
|
130
|
+
if request.response_format and request.response_format.type == "json_schema":
|
131
|
+
sampling_params["json_schema"] = convert_json_schema_to_str(
|
132
|
+
request.response_format.json_schema.schema_
|
133
|
+
)
|
134
|
+
elif request.response_format and request.response_format.type == "json_object":
|
135
|
+
sampling_params["json_schema"] = '{"type": "object"}'
|
136
|
+
elif (
|
137
|
+
request.response_format and request.response_format.type == "structural_tag"
|
138
|
+
):
|
139
|
+
sampling_params["structural_tag"] = convert_json_schema_to_str(
|
140
|
+
request.response_format.model_dump(by_alias=True)
|
141
|
+
)
|
142
|
+
|
128
143
|
return sampling_params
|
129
144
|
|
130
145
|
async def _handle_streaming_request(
|
sglang/srt/entrypoints/tool.py
CHANGED
@@ -4,6 +4,8 @@ import os
|
|
4
4
|
from abc import ABC, abstractmethod
|
5
5
|
from typing import TYPE_CHECKING, Any
|
6
6
|
|
7
|
+
from sglang.srt.utils import print_info_once, print_warning_once
|
8
|
+
|
7
9
|
if TYPE_CHECKING:
|
8
10
|
# Avoid circular import.
|
9
11
|
from sglang.srt.entrypoints.context import ConversationContext
|
@@ -25,7 +27,7 @@ class HarmonyBrowserTool(Tool):
|
|
25
27
|
exa_api_key = os.getenv("EXA_API_KEY")
|
26
28
|
if not exa_api_key:
|
27
29
|
self.enabled = False
|
28
|
-
|
30
|
+
print_warning_once("EXA_API_KEY is not set, browsing is disabled")
|
29
31
|
return
|
30
32
|
|
31
33
|
try:
|
@@ -33,12 +35,12 @@ class HarmonyBrowserTool(Tool):
|
|
33
35
|
from gpt_oss.tools.simple_browser.backend import ExaBackend
|
34
36
|
except ImportError:
|
35
37
|
self.enabled = False
|
36
|
-
|
38
|
+
print_warning_once("gpt_oss is not installed, browsing is disabled")
|
37
39
|
return
|
38
40
|
|
39
41
|
browser_backend = ExaBackend(source="web", api_key=exa_api_key)
|
40
42
|
self.browser_tool = SimpleBrowserTool(backend=browser_backend)
|
41
|
-
|
43
|
+
print_info_once("Browser tool initialized")
|
42
44
|
|
43
45
|
async def get_result(self, context: "ConversationContext") -> Any:
|
44
46
|
from sglang.srt.entrypoints.context import HarmonyContext
|
@@ -64,13 +66,11 @@ class HarmonyPythonTool(Tool):
|
|
64
66
|
from gpt_oss.tools.python_docker.docker_tool import PythonTool
|
65
67
|
except ImportError:
|
66
68
|
self.enabled = False
|
67
|
-
|
68
|
-
"gpt_oss is not installed, code interpreter is disabled"
|
69
|
-
)
|
69
|
+
print_warning_once("gpt_oss is not installed, code interpreter is disabled")
|
70
70
|
return
|
71
71
|
|
72
72
|
self.python_tool = PythonTool()
|
73
|
-
|
73
|
+
print_info_once("Code interpreter tool initialized")
|
74
74
|
|
75
75
|
async def get_result(self, context: "ConversationContext") -> Any:
|
76
76
|
from sglang.srt.entrypoints.context import HarmonyContext
|
@@ -0,0 +1,222 @@
|
|
1
|
+
import json
|
2
|
+
import logging
|
3
|
+
import re
|
4
|
+
from typing import List
|
5
|
+
|
6
|
+
from sglang.srt.entrypoints.openai.protocol import Tool
|
7
|
+
from sglang.srt.function_call.base_format_detector import BaseFormatDetector
|
8
|
+
from sglang.srt.function_call.core_types import (
|
9
|
+
StreamingParseResult,
|
10
|
+
StructureInfo,
|
11
|
+
ToolCallItem,
|
12
|
+
_GetInfoFunc,
|
13
|
+
)
|
14
|
+
from sglang.srt.function_call.ebnf_composer import EBNFComposer
|
15
|
+
from sglang.srt.function_call.utils import _is_complete_json
|
16
|
+
|
17
|
+
logger = logging.getLogger(__name__)
|
18
|
+
|
19
|
+
|
20
|
+
class DeepSeekV31Detector(BaseFormatDetector):
|
21
|
+
"""
|
22
|
+
Detector for DeepSeek V3 model function call format.
|
23
|
+
|
24
|
+
The DeepSeek V3 format uses special Unicode tokens to delimit function calls
|
25
|
+
with JSON code blocks for arguments.
|
26
|
+
|
27
|
+
Format Structure:
|
28
|
+
```
|
29
|
+
<|tool▁calls▁begin|><|tool▁call▁begin|>{function_name}<|tool▁sep|>{json_arguments}<|tool▁calls▁end|><|end▁of▁sentence|>
|
30
|
+
```
|
31
|
+
Examples:
|
32
|
+
```
|
33
|
+
<|tool▁calls▁begin|><|tool▁call▁begin|>get_current_weather<|tool▁sep|>{"location": "Tokyo"}<|tool▁call▁end|><|tool▁call▁begin|>get_current_weather<|tool▁sep|>{"location": "Paris"}<|tool▁call▁end|><|tool▁calls▁end|><|end▁of▁sentence|>
|
34
|
+
```
|
35
|
+
|
36
|
+
Key Components:
|
37
|
+
- Tool Calls Section: Wrapped between `<|tool▁calls▁begin|>` and `<|tool▁calls▁end|>`
|
38
|
+
- Individual Tool Call: Wrapped between `<|tool▁call▁begin|>` and `<|tool▁call▁end|>`
|
39
|
+
- Function Declaration: `<|tool▁call▁begin|>{function_name}<|tool▁sep|>`
|
40
|
+
- Arguments: JSON code block between `<|tool▁sep|>` and `<|tool▁call▁end|>`
|
41
|
+
- Supports multiple tool calls
|
42
|
+
|
43
|
+
Reference: https://www.modelscope.cn/models/deepseek-ai/DeepSeek-V3.1
|
44
|
+
"""
|
45
|
+
|
46
|
+
def __init__(self):
|
47
|
+
super().__init__()
|
48
|
+
self.bot_token = "<|tool▁calls▁begin|>"
|
49
|
+
self.eot_token = "<|tool▁calls▁end|>"
|
50
|
+
self.func_call_regex = r"<|tool▁call▁begin|>.*?<|tool▁call▁end|>"
|
51
|
+
self.func_detail_regex = (
|
52
|
+
r"<|tool▁call▁begin|>(.*)<|tool▁sep|>(.*)<|tool▁call▁end|>"
|
53
|
+
)
|
54
|
+
self._last_arguments = ""
|
55
|
+
self.current_tool_id = -1
|
56
|
+
|
57
|
+
def has_tool_call(self, text: str) -> bool:
|
58
|
+
"""Check if the text contains a deepseek format tool call."""
|
59
|
+
return self.bot_token in text
|
60
|
+
|
61
|
+
def detect_and_parse(self, text: str, tools: List[Tool]) -> StreamingParseResult:
|
62
|
+
"""
|
63
|
+
One-time parsing: Detects and parses tool calls in the provided text.
|
64
|
+
|
65
|
+
:param text: The complete text to parse.
|
66
|
+
:param tools: List of available tools.
|
67
|
+
:return: ParseResult indicating success or failure, consumed text, leftover text, and parsed calls.
|
68
|
+
"""
|
69
|
+
idx = text.find(self.bot_token)
|
70
|
+
normal_text = text[:idx].strip() if idx != -1 else text
|
71
|
+
if self.bot_token not in text:
|
72
|
+
return StreamingParseResult(normal_text=normal_text, calls=[])
|
73
|
+
match_result_list = re.findall(self.func_call_regex, text, re.DOTALL)
|
74
|
+
calls = []
|
75
|
+
try:
|
76
|
+
for match_result in match_result_list:
|
77
|
+
# Get function name
|
78
|
+
func_detail = re.search(self.func_detail_regex, match_result, re.DOTALL)
|
79
|
+
func_name = func_detail.group(1)
|
80
|
+
func_args = func_detail.group(2)
|
81
|
+
func_args = json.loads(func_args)
|
82
|
+
# construct match_result for parse_base_json
|
83
|
+
match_result = {"name": func_name, "parameters": func_args}
|
84
|
+
calls.extend(self.parse_base_json(match_result, tools))
|
85
|
+
return StreamingParseResult(normal_text=normal_text, calls=calls)
|
86
|
+
except Exception as e:
|
87
|
+
logger.error(f"Error in detect_and_parse: {e}")
|
88
|
+
# return the normal text if parsing fails
|
89
|
+
return StreamingParseResult(normal_text=text)
|
90
|
+
|
91
|
+
def parse_streaming_increment(
|
92
|
+
self, new_text: str, tools: List[Tool]
|
93
|
+
) -> StreamingParseResult:
|
94
|
+
"""
|
95
|
+
Streaming incremental parsing tool calls for DeepSeekV3 format.
|
96
|
+
"""
|
97
|
+
self._buffer += new_text
|
98
|
+
current_text = self._buffer
|
99
|
+
|
100
|
+
# Check if we have a tool call (either the start token or individual tool call)
|
101
|
+
has_tool_call = (
|
102
|
+
self.bot_token in current_text or "<|tool▁call▁begin|>" in current_text
|
103
|
+
)
|
104
|
+
|
105
|
+
if not has_tool_call:
|
106
|
+
self._buffer = ""
|
107
|
+
for e_token in [self.eot_token, "<|tool▁call▁end|>"]:
|
108
|
+
if e_token in new_text:
|
109
|
+
new_text = new_text.replace(e_token, "")
|
110
|
+
return StreamingParseResult(normal_text=new_text)
|
111
|
+
|
112
|
+
if not hasattr(self, "_tool_indices"):
|
113
|
+
self._tool_indices = self._get_tool_indices(tools)
|
114
|
+
|
115
|
+
calls: list[ToolCallItem] = []
|
116
|
+
try:
|
117
|
+
partial_match = re.search(
|
118
|
+
pattern=r"<|tool▁call▁begin|>(.*)<|tool▁sep|>(.*)<|tool▁call▁end|>",
|
119
|
+
string=current_text,
|
120
|
+
flags=re.DOTALL,
|
121
|
+
)
|
122
|
+
if partial_match:
|
123
|
+
func_name = partial_match.group(1).strip()
|
124
|
+
func_args_raw = partial_match.group(2).strip()
|
125
|
+
|
126
|
+
# Initialize state if this is the first tool call
|
127
|
+
if self.current_tool_id == -1:
|
128
|
+
self.current_tool_id = 0
|
129
|
+
self.prev_tool_call_arr = []
|
130
|
+
self.streamed_args_for_tool = [""]
|
131
|
+
|
132
|
+
# Ensure we have enough entries in our tracking arrays
|
133
|
+
while len(self.prev_tool_call_arr) <= self.current_tool_id:
|
134
|
+
self.prev_tool_call_arr.append({})
|
135
|
+
while len(self.streamed_args_for_tool) <= self.current_tool_id:
|
136
|
+
self.streamed_args_for_tool.append("")
|
137
|
+
|
138
|
+
if not self.current_tool_name_sent:
|
139
|
+
calls.append(
|
140
|
+
ToolCallItem(
|
141
|
+
tool_index=self.current_tool_id,
|
142
|
+
name=func_name,
|
143
|
+
parameters="",
|
144
|
+
)
|
145
|
+
)
|
146
|
+
self.current_tool_name_sent = True
|
147
|
+
# Store the tool call info for serving layer completions endpoint
|
148
|
+
self.prev_tool_call_arr[self.current_tool_id] = {
|
149
|
+
"name": func_name,
|
150
|
+
"arguments": {},
|
151
|
+
}
|
152
|
+
else:
|
153
|
+
argument_diff = (
|
154
|
+
func_args_raw[len(self._last_arguments) :]
|
155
|
+
if func_args_raw.startswith(self._last_arguments)
|
156
|
+
else func_args_raw
|
157
|
+
)
|
158
|
+
|
159
|
+
if argument_diff:
|
160
|
+
calls.append(
|
161
|
+
ToolCallItem(
|
162
|
+
tool_index=self.current_tool_id,
|
163
|
+
name=None,
|
164
|
+
parameters=argument_diff,
|
165
|
+
)
|
166
|
+
)
|
167
|
+
self._last_arguments += argument_diff
|
168
|
+
self.streamed_args_for_tool[
|
169
|
+
self.current_tool_id
|
170
|
+
] += argument_diff
|
171
|
+
|
172
|
+
if _is_complete_json(func_args_raw):
|
173
|
+
# Update the stored arguments
|
174
|
+
try:
|
175
|
+
parsed_args = json.loads(func_args_raw)
|
176
|
+
self.prev_tool_call_arr[self.current_tool_id][
|
177
|
+
"arguments"
|
178
|
+
] = parsed_args
|
179
|
+
except json.JSONDecodeError:
|
180
|
+
pass
|
181
|
+
|
182
|
+
# Find the end of the current tool call and remove only that part from buffer
|
183
|
+
tool_call_end_pattern = (
|
184
|
+
r"<|tool▁call▁begin|>.*?<|tool▁call▁end|>"
|
185
|
+
)
|
186
|
+
match = re.search(
|
187
|
+
tool_call_end_pattern, current_text, re.DOTALL
|
188
|
+
)
|
189
|
+
if match:
|
190
|
+
# Remove the completed tool call from buffer, keep any remaining content
|
191
|
+
self._buffer = current_text[match.end() :]
|
192
|
+
else:
|
193
|
+
self._buffer = ""
|
194
|
+
|
195
|
+
result = StreamingParseResult(normal_text="", calls=calls)
|
196
|
+
self.current_tool_id += 1
|
197
|
+
self._last_arguments = ""
|
198
|
+
self.current_tool_name_sent = False
|
199
|
+
return result
|
200
|
+
|
201
|
+
return StreamingParseResult(normal_text="", calls=calls)
|
202
|
+
|
203
|
+
except Exception as e:
|
204
|
+
logger.error(f"Error in parse_streaming_increment: {e}")
|
205
|
+
return StreamingParseResult(normal_text=current_text)
|
206
|
+
|
207
|
+
def structure_info(self) -> _GetInfoFunc:
|
208
|
+
return lambda name: StructureInfo(
|
209
|
+
begin="<|tool▁call▁begin|>" + name + "<|tool▁sep|>",
|
210
|
+
end="<|tool▁call▁end|>",
|
211
|
+
trigger="<|tool▁call▁begin|>" + name + "<|tool▁sep|>",
|
212
|
+
)
|
213
|
+
|
214
|
+
def build_ebnf(self, tools: List[Tool]):
|
215
|
+
return EBNFComposer.build_ebnf(
|
216
|
+
tools,
|
217
|
+
sequence_start_token=self.bot_token,
|
218
|
+
sequence_end_token=self.eot_token,
|
219
|
+
tool_call_separator="",
|
220
|
+
call_rule_fmt='"<|tool▁call▁begin|>{name}<|tool▁sep|>{arguments_rule}<|tool▁call▁end|>"',
|
221
|
+
function_format="json",
|
222
|
+
)
|
@@ -10,6 +10,7 @@ from sglang.srt.entrypoints.openai.protocol import (
|
|
10
10
|
from sglang.srt.function_call.base_format_detector import BaseFormatDetector
|
11
11
|
from sglang.srt.function_call.core_types import ToolCallItem
|
12
12
|
from sglang.srt.function_call.deepseekv3_detector import DeepSeekV3Detector
|
13
|
+
from sglang.srt.function_call.deepseekv31_detector import DeepSeekV31Detector
|
13
14
|
from sglang.srt.function_call.glm4_moe_detector import Glm4MoeDetector
|
14
15
|
from sglang.srt.function_call.gpt_oss_detector import GptOssDetector
|
15
16
|
from sglang.srt.function_call.kimik2_detector import KimiK2Detector
|
@@ -37,6 +38,7 @@ class FunctionCallParser:
|
|
37
38
|
"qwen25": Qwen25Detector,
|
38
39
|
"mistral": MistralDetector,
|
39
40
|
"deepseekv3": DeepSeekV3Detector,
|
41
|
+
"deepseekv31": DeepSeekV31Detector,
|
40
42
|
"pythonic": PythonicDetector,
|
41
43
|
"kimi_k2": KimiK2Detector,
|
42
44
|
"qwen3_coder": Qwen3CoderDetector,
|