sglang 0.5.1.post2__py3-none-any.whl → 0.5.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sglang/bench_one_batch.py +3 -0
- sglang/bench_one_batch_server.py +89 -54
- sglang/bench_serving.py +437 -40
- sglang/lang/interpreter.py +1 -1
- sglang/profiler.py +0 -1
- sglang/srt/configs/__init__.py +4 -0
- sglang/srt/configs/internvl.py +6 -0
- sglang/srt/configs/longcat_flash.py +104 -0
- sglang/srt/configs/model_config.py +37 -7
- sglang/srt/configs/qwen3_next.py +326 -0
- sglang/srt/connector/__init__.py +1 -1
- sglang/srt/connector/base_connector.py +1 -2
- sglang/srt/connector/redis.py +2 -2
- sglang/srt/connector/serde/__init__.py +1 -1
- sglang/srt/connector/serde/safe_serde.py +4 -3
- sglang/srt/custom_op.py +11 -1
- sglang/srt/debug_utils/dump_comparator.py +81 -44
- sglang/srt/debug_utils/dump_loader.py +97 -0
- sglang/srt/debug_utils/dumper.py +11 -3
- sglang/srt/debug_utils/text_comparator.py +73 -11
- sglang/srt/disaggregation/ascend/conn.py +75 -0
- sglang/srt/disaggregation/base/conn.py +1 -1
- sglang/srt/disaggregation/common/conn.py +15 -12
- sglang/srt/disaggregation/decode.py +6 -4
- sglang/srt/disaggregation/fake/conn.py +1 -1
- sglang/srt/disaggregation/mini_lb.py +6 -420
- sglang/srt/disaggregation/mooncake/conn.py +18 -10
- sglang/srt/disaggregation/nixl/conn.py +180 -16
- sglang/srt/disaggregation/prefill.py +6 -4
- sglang/srt/disaggregation/utils.py +5 -50
- sglang/srt/distributed/parallel_state.py +94 -58
- sglang/srt/entrypoints/engine.py +34 -14
- sglang/srt/entrypoints/http_server.py +172 -47
- sglang/srt/entrypoints/openai/protocol.py +90 -27
- sglang/srt/entrypoints/openai/serving_base.py +6 -2
- sglang/srt/entrypoints/openai/serving_chat.py +82 -26
- sglang/srt/entrypoints/openai/serving_completions.py +25 -4
- sglang/srt/entrypoints/openai/serving_embedding.py +8 -4
- sglang/srt/entrypoints/openai/serving_responses.py +7 -4
- sglang/srt/eplb/eplb_manager.py +28 -4
- sglang/srt/eplb/expert_distribution.py +55 -15
- sglang/srt/eplb/expert_location.py +8 -3
- sglang/srt/eplb/expert_location_updater.py +1 -1
- sglang/srt/function_call/deepseekv31_detector.py +222 -0
- sglang/srt/function_call/ebnf_composer.py +11 -9
- sglang/srt/function_call/function_call_parser.py +2 -0
- sglang/srt/function_call/glm4_moe_detector.py +1 -1
- sglang/srt/function_call/gpt_oss_detector.py +144 -256
- sglang/srt/function_call/qwen3_coder_detector.py +1 -1
- sglang/srt/hf_transformers_utils.py +28 -7
- sglang/srt/layers/activation.py +44 -9
- sglang/srt/layers/attention/aiter_backend.py +93 -68
- sglang/srt/layers/attention/ascend_backend.py +381 -136
- sglang/srt/layers/attention/fla/chunk.py +242 -0
- sglang/srt/layers/attention/fla/chunk_delta_h.py +314 -0
- sglang/srt/layers/attention/fla/chunk_o.py +178 -0
- sglang/srt/layers/attention/fla/chunk_scaled_dot_kkt.py +151 -0
- sglang/srt/layers/attention/fla/cumsum.py +300 -0
- sglang/srt/layers/attention/fla/fused_recurrent.py +640 -0
- sglang/srt/layers/attention/fla/fused_sigmoid_gating_recurrent.py +232 -0
- sglang/srt/layers/attention/fla/index.py +37 -0
- sglang/srt/layers/attention/fla/l2norm.py +150 -0
- sglang/srt/layers/attention/fla/layernorm_gated.py +326 -0
- sglang/srt/layers/attention/fla/op.py +66 -0
- sglang/srt/layers/attention/fla/solve_tril.py +465 -0
- sglang/srt/layers/attention/fla/utils.py +331 -0
- sglang/srt/layers/attention/fla/wy_fast.py +158 -0
- sglang/srt/layers/attention/flashattention_backend.py +241 -7
- sglang/srt/layers/attention/flashinfer_backend.py +11 -6
- sglang/srt/layers/attention/flashinfer_mla_backend.py +21 -14
- sglang/srt/layers/attention/hybrid_attn_backend.py +47 -8
- sglang/srt/layers/attention/hybrid_linear_attn_backend.py +584 -0
- sglang/srt/layers/attention/intel_amx_backend.py +3 -0
- sglang/srt/layers/attention/mamba/causal_conv1d.py +128 -0
- sglang/srt/layers/attention/mamba/causal_conv1d_triton.py +1052 -0
- sglang/srt/layers/attention/mamba/mamba.py +64 -0
- sglang/srt/layers/attention/torch_native_backend.py +12 -6
- sglang/srt/layers/attention/trtllm_mla_backend.py +126 -36
- sglang/srt/layers/attention/wave_ops/decode_attention.py +2 -4
- sglang/srt/layers/attention/wave_ops/extend_attention.py +1 -3
- sglang/srt/layers/communicator.py +45 -8
- sglang/srt/layers/layernorm.py +54 -12
- sglang/srt/layers/logits_processor.py +10 -3
- sglang/srt/layers/moe/__init__.py +2 -1
- sglang/srt/layers/moe/cutlass_moe.py +0 -8
- sglang/srt/layers/moe/cutlass_w4a8_moe.py +4 -12
- sglang/srt/layers/moe/ep_moe/kernels.py +74 -0
- sglang/srt/layers/moe/ep_moe/layer.py +111 -56
- sglang/srt/layers/moe/fused_moe_native.py +5 -3
- sglang/srt/layers/moe/fused_moe_triton/__init__.py +5 -3
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=129,N=352,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/{E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json → E=257,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json } +29 -29
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=257,N=64,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=512,N=64,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/fused_moe.py +9 -1049
- sglang/srt/layers/moe/fused_moe_triton/fused_moe_triton_config.py +212 -0
- sglang/srt/layers/moe/fused_moe_triton/fused_moe_triton_kernels.py +799 -0
- sglang/srt/layers/moe/fused_moe_triton/layer.py +56 -45
- sglang/srt/layers/moe/fused_moe_triton/moe_align_block_size.py +87 -0
- sglang/srt/layers/moe/moe_runner/__init__.py +2 -1
- sglang/srt/layers/moe/moe_runner/base.py +274 -1
- sglang/srt/layers/moe/moe_runner/runner.py +80 -0
- sglang/srt/layers/moe/moe_runner/triton.py +448 -0
- sglang/srt/layers/moe/token_dispatcher/__init__.py +16 -4
- sglang/srt/layers/moe/token_dispatcher/{base_dispatcher.py → base.py} +67 -17
- sglang/srt/layers/moe/token_dispatcher/deepep.py +41 -38
- sglang/srt/layers/moe/token_dispatcher/standard.py +44 -2
- sglang/srt/layers/moe/topk.py +43 -12
- sglang/srt/layers/moe/utils.py +6 -5
- sglang/srt/layers/quantization/awq.py +19 -7
- sglang/srt/layers/quantization/base_config.py +11 -6
- sglang/srt/layers/quantization/blockwise_int8.py +38 -27
- sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +50 -30
- sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +13 -1
- sglang/srt/layers/quantization/deep_gemm_wrapper/compile_utils.py +141 -235
- sglang/srt/layers/quantization/deep_gemm_wrapper/configurer.py +5 -10
- sglang/srt/layers/quantization/deep_gemm_wrapper/entrypoint.py +31 -22
- sglang/srt/layers/quantization/fp8.py +78 -48
- sglang/srt/layers/quantization/fp8_kernel.py +2 -2
- sglang/srt/layers/quantization/fp8_utils.py +45 -31
- sglang/srt/layers/quantization/gptq.py +25 -17
- sglang/srt/layers/quantization/modelopt_quant.py +107 -40
- sglang/srt/layers/quantization/moe_wna16.py +21 -18
- sglang/srt/layers/quantization/mxfp4.py +93 -68
- sglang/srt/layers/quantization/mxfp4_tensor.py +3 -1
- sglang/srt/layers/quantization/quark/quark_moe.py +32 -27
- sglang/srt/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +49 -30
- sglang/srt/layers/quantization/quark/utils.py +97 -0
- sglang/srt/layers/quantization/rocm_mxfp4_utils.py +13 -0
- sglang/srt/layers/quantization/unquant.py +135 -47
- sglang/srt/layers/quantization/utils.py +13 -0
- sglang/srt/layers/quantization/w4afp8.py +60 -42
- sglang/srt/layers/quantization/w8a8_fp8.py +35 -20
- sglang/srt/layers/quantization/w8a8_int8.py +83 -41
- sglang/srt/layers/rocm_linear_utils.py +44 -0
- sglang/srt/layers/rotary_embedding.py +28 -19
- sglang/srt/layers/sampler.py +29 -5
- sglang/srt/layers/utils.py +0 -14
- sglang/srt/lora/backend/base_backend.py +50 -8
- sglang/srt/lora/backend/triton_backend.py +90 -2
- sglang/srt/lora/layers.py +32 -0
- sglang/srt/lora/lora.py +4 -1
- sglang/srt/lora/lora_manager.py +35 -112
- sglang/srt/lora/mem_pool.py +24 -10
- sglang/srt/lora/utils.py +18 -9
- sglang/srt/managers/cache_controller.py +396 -365
- sglang/srt/managers/data_parallel_controller.py +30 -15
- sglang/srt/managers/detokenizer_manager.py +18 -2
- sglang/srt/managers/disagg_service.py +46 -0
- sglang/srt/managers/io_struct.py +190 -11
- sglang/srt/managers/mm_utils.py +6 -1
- sglang/srt/managers/multi_tokenizer_mixin.py +579 -0
- sglang/srt/managers/schedule_batch.py +27 -44
- sglang/srt/managers/schedule_policy.py +4 -3
- sglang/srt/managers/scheduler.py +148 -122
- sglang/srt/managers/scheduler_metrics_mixin.py +114 -8
- sglang/srt/managers/scheduler_output_processor_mixin.py +29 -19
- sglang/srt/managers/scheduler_profiler_mixin.py +1 -1
- sglang/srt/managers/scheduler_update_weights_mixin.py +8 -1
- sglang/srt/managers/template_manager.py +3 -3
- sglang/srt/managers/tokenizer_communicator_mixin.py +491 -0
- sglang/srt/managers/tokenizer_manager.py +77 -480
- sglang/srt/managers/tp_worker.py +16 -4
- sglang/srt/managers/tp_worker_overlap_thread.py +8 -10
- sglang/srt/mem_cache/allocator.py +1 -1
- sglang/srt/mem_cache/chunk_cache.py +1 -1
- sglang/srt/mem_cache/hicache_storage.py +53 -40
- sglang/srt/mem_cache/hiradix_cache.py +196 -104
- sglang/srt/mem_cache/lora_radix_cache.py +1 -1
- sglang/srt/mem_cache/memory_pool.py +395 -53
- sglang/srt/mem_cache/memory_pool_host.py +27 -19
- sglang/srt/mem_cache/radix_cache.py +6 -6
- sglang/srt/mem_cache/radix_cache_cpp.py +1 -1
- sglang/srt/mem_cache/storage/hf3fs/hf3fs_client.py +164 -0
- sglang/srt/mem_cache/storage/hf3fs/{client_hf3fs.py → hf3fs_usrbio_client.py} +5 -1
- sglang/srt/mem_cache/storage/hf3fs/mini_3fs_metadata_server.py +61 -34
- sglang/srt/mem_cache/storage/hf3fs/storage_hf3fs.py +152 -23
- sglang/srt/mem_cache/storage/lmcache/lmc_radix_cache.py +280 -0
- sglang/srt/mem_cache/storage/lmcache/unit_test.py +121 -0
- sglang/srt/mem_cache/storage/mooncake_store/mooncake_store.py +154 -95
- sglang/srt/mem_cache/storage/mooncake_store/test_mooncake_store.py +161 -0
- sglang/srt/mem_cache/swa_radix_cache.py +1 -3
- sglang/srt/metrics/collector.py +484 -63
- sglang/srt/metrics/startup_func_log_and_timer.py +150 -0
- sglang/srt/metrics/utils.py +48 -0
- sglang/srt/model_executor/cpu_graph_runner.py +640 -0
- sglang/srt/model_executor/cuda_graph_runner.py +13 -5
- sglang/srt/model_executor/forward_batch_info.py +72 -18
- sglang/srt/model_executor/model_runner.py +190 -32
- sglang/srt/model_loader/__init__.py +9 -3
- sglang/srt/model_loader/loader.py +33 -28
- sglang/srt/model_loader/utils.py +12 -0
- sglang/srt/model_loader/weight_utils.py +2 -1
- sglang/srt/models/deepseek_v2.py +323 -53
- sglang/srt/models/gemma3n_mm.py +1 -1
- sglang/srt/models/glm4_moe.py +10 -1
- sglang/srt/models/glm4v.py +4 -2
- sglang/srt/models/gpt_oss.py +7 -19
- sglang/srt/models/internvl.py +28 -0
- sglang/srt/models/llama4.py +9 -0
- sglang/srt/models/llama_eagle3.py +17 -0
- sglang/srt/models/longcat_flash.py +1026 -0
- sglang/srt/models/longcat_flash_nextn.py +699 -0
- sglang/srt/models/minicpmv.py +165 -3
- sglang/srt/models/mllama4.py +25 -0
- sglang/srt/models/opt.py +637 -0
- sglang/srt/models/qwen2.py +33 -3
- sglang/srt/models/qwen2_5_vl.py +91 -42
- sglang/srt/models/qwen2_moe.py +79 -14
- sglang/srt/models/qwen3.py +8 -2
- sglang/srt/models/qwen3_moe.py +39 -8
- sglang/srt/models/qwen3_next.py +1039 -0
- sglang/srt/models/qwen3_next_mtp.py +109 -0
- sglang/srt/models/torch_native_llama.py +1 -1
- sglang/srt/models/transformers.py +1 -1
- sglang/srt/multimodal/processors/base_processor.py +4 -2
- sglang/srt/multimodal/processors/glm4v.py +9 -9
- sglang/srt/multimodal/processors/internvl.py +141 -129
- sglang/srt/{conversation.py → parser/conversation.py} +38 -5
- sglang/srt/parser/harmony_parser.py +588 -0
- sglang/srt/parser/reasoning_parser.py +309 -0
- sglang/srt/sampling/penaltylib/orchestrator.py +14 -2
- sglang/srt/sampling/sampling_batch_info.py +18 -15
- sglang/srt/server_args.py +307 -80
- sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +5 -0
- sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py +10 -1
- sglang/srt/speculative/eagle_worker.py +216 -120
- sglang/srt/speculative/spec_info.py +5 -0
- sglang/srt/speculative/standalone_worker.py +109 -0
- sglang/srt/tokenizer/tiktoken_tokenizer.py +6 -1
- sglang/srt/utils.py +96 -7
- sglang/srt/weight_sync/utils.py +1 -1
- sglang/test/attention/test_trtllm_mla_backend.py +181 -8
- sglang/test/few_shot_gsm8k.py +1 -0
- sglang/test/runners.py +4 -0
- sglang/test/test_cutlass_moe.py +24 -6
- sglang/test/test_cutlass_w4a8_moe.py +24 -9
- sglang/test/test_disaggregation_utils.py +66 -0
- sglang/test/test_utils.py +25 -1
- sglang/utils.py +5 -0
- sglang/version.py +1 -1
- {sglang-0.5.1.post2.dist-info → sglang-0.5.2.dist-info}/METADATA +13 -10
- {sglang-0.5.1.post2.dist-info → sglang-0.5.2.dist-info}/RECORD +253 -201
- sglang/srt/disaggregation/launch_lb.py +0 -131
- sglang/srt/mem_cache/storage/mooncake_store/unit_test.py +0 -40
- sglang/srt/reasoning_parser.py +0 -553
- /sglang/srt/{model_parallel.py → layers/model_parallel.py} +0 -0
- /sglang/srt/{code_completion_parser.py → parser/code_completion_parser.py} +0 -0
- /sglang/srt/{jinja_template_utils.py → parser/jinja_template_utils.py} +0 -0
- {sglang-0.5.1.post2.dist-info → sglang-0.5.2.dist-info}/WHEEL +0 -0
- {sglang-0.5.1.post2.dist-info → sglang-0.5.2.dist-info}/licenses/LICENSE +0 -0
- {sglang-0.5.1.post2.dist-info → sglang-0.5.2.dist-info}/top_level.txt +0 -0
sglang/srt/reasoning_parser.py
DELETED
@@ -1,553 +0,0 @@
|
|
1
|
-
import re
|
2
|
-
from typing import Dict, Optional, Tuple, Type
|
3
|
-
|
4
|
-
|
5
|
-
class StreamingParseResult:
|
6
|
-
"""Result of streaming incremental parsing."""
|
7
|
-
|
8
|
-
def __init__(self, normal_text: str = "", reasoning_text: str = ""):
|
9
|
-
self.normal_text = normal_text
|
10
|
-
self.reasoning_text = reasoning_text
|
11
|
-
|
12
|
-
|
13
|
-
class BaseReasoningFormatDetector:
|
14
|
-
"""Base class providing two sets of interfaces: one-time and streaming incremental."""
|
15
|
-
|
16
|
-
def __init__(
|
17
|
-
self,
|
18
|
-
think_start_token: str,
|
19
|
-
think_end_token: str,
|
20
|
-
force_reasoning: bool = False,
|
21
|
-
stream_reasoning: bool = True,
|
22
|
-
):
|
23
|
-
self.think_start_token = think_start_token
|
24
|
-
self.think_end_token = think_end_token
|
25
|
-
self._in_reasoning = force_reasoning
|
26
|
-
self.stream_reasoning = stream_reasoning
|
27
|
-
|
28
|
-
self._buffer = ""
|
29
|
-
self.stripped_think_start = False
|
30
|
-
|
31
|
-
def detect_and_parse(self, text: str) -> StreamingParseResult:
|
32
|
-
"""
|
33
|
-
One-time parsing: Detects and parses reasoning sections in the provided text.
|
34
|
-
Returns both reasoning content and normal text separately.
|
35
|
-
"""
|
36
|
-
in_reasoning = self._in_reasoning or self.think_start_token in text
|
37
|
-
|
38
|
-
if not in_reasoning:
|
39
|
-
return StreamingParseResult(normal_text=text)
|
40
|
-
|
41
|
-
# The text is considered to be in a reasoning block.
|
42
|
-
processed_text = text.replace(self.think_start_token, "").strip()
|
43
|
-
|
44
|
-
if self.think_end_token not in processed_text:
|
45
|
-
# Assume reasoning was truncated before `</think>` token
|
46
|
-
return StreamingParseResult(reasoning_text=processed_text)
|
47
|
-
|
48
|
-
# Extract reasoning content
|
49
|
-
splits = processed_text.split(self.think_end_token, maxsplit=1)
|
50
|
-
reasoning_text = splits[0]
|
51
|
-
normal_text = splits[1].strip()
|
52
|
-
|
53
|
-
return StreamingParseResult(
|
54
|
-
normal_text=normal_text, reasoning_text=reasoning_text
|
55
|
-
)
|
56
|
-
|
57
|
-
def parse_streaming_increment(self, new_text: str) -> StreamingParseResult:
|
58
|
-
"""
|
59
|
-
Streaming incremental parsing for reasoning content.
|
60
|
-
Handles partial reasoning tags and content.
|
61
|
-
|
62
|
-
If stream_reasoning is False:
|
63
|
-
Accumulates reasoning content until the end tag is found
|
64
|
-
If stream_reasoning is True:
|
65
|
-
Streams reasoning content as it arrives
|
66
|
-
"""
|
67
|
-
self._buffer += new_text
|
68
|
-
current_text = self._buffer
|
69
|
-
|
70
|
-
# If the current text is a prefix of the think token, keep buffering
|
71
|
-
if any(
|
72
|
-
token.startswith(current_text) and token != current_text
|
73
|
-
for token in [self.think_start_token, self.think_end_token]
|
74
|
-
):
|
75
|
-
return StreamingParseResult()
|
76
|
-
|
77
|
-
# Strip `<think>` token if present
|
78
|
-
if not self.stripped_think_start and self.think_start_token in current_text:
|
79
|
-
current_text = current_text.replace(self.think_start_token, "")
|
80
|
-
self.stripped_think_start = True
|
81
|
-
self._in_reasoning = True
|
82
|
-
|
83
|
-
# Handle end of reasoning block
|
84
|
-
if self._in_reasoning and self.think_end_token in current_text:
|
85
|
-
end_idx = current_text.find(self.think_end_token)
|
86
|
-
|
87
|
-
reasoning_text = current_text[:end_idx]
|
88
|
-
|
89
|
-
self._buffer = ""
|
90
|
-
self._in_reasoning = False
|
91
|
-
normal_text = current_text[end_idx + len(self.think_end_token) :]
|
92
|
-
|
93
|
-
return StreamingParseResult(
|
94
|
-
normal_text=normal_text, reasoning_text=reasoning_text.rstrip()
|
95
|
-
)
|
96
|
-
|
97
|
-
# Continue with reasoning content
|
98
|
-
if self._in_reasoning:
|
99
|
-
if self.stream_reasoning:
|
100
|
-
# Stream the content immediately
|
101
|
-
self._buffer = ""
|
102
|
-
return StreamingParseResult(reasoning_text=current_text)
|
103
|
-
else:
|
104
|
-
return StreamingParseResult()
|
105
|
-
|
106
|
-
# If we're not in a reasoning block return as normal text
|
107
|
-
if not self._in_reasoning:
|
108
|
-
self._buffer = ""
|
109
|
-
return StreamingParseResult(normal_text=current_text)
|
110
|
-
|
111
|
-
return StreamingParseResult()
|
112
|
-
|
113
|
-
|
114
|
-
class DeepSeekR1Detector(BaseReasoningFormatDetector):
|
115
|
-
"""
|
116
|
-
Detector for DeepSeek-R1 model.
|
117
|
-
Assumes reasoning format:
|
118
|
-
(<think>)*(.*)</think>
|
119
|
-
Returns all the text before the </think> tag as `reasoning_text`
|
120
|
-
and the rest of the text as `normal_text`.
|
121
|
-
|
122
|
-
Supported models:
|
123
|
-
- DeepSeek-R1: Always generates thinking content without <think> start tag
|
124
|
-
- DeepSeek-R1-0528: Generates thinking content with <think> start tag
|
125
|
-
|
126
|
-
Format patterns:
|
127
|
-
- DeepSeek-R1: "I need to think about this...</think>The answer is 42."
|
128
|
-
- DeepSeek-R1-0528: "<think>I need to think about this...</think>The answer is 42."
|
129
|
-
|
130
|
-
Args:
|
131
|
-
stream_reasoning (bool): If False, accumulates reasoning content until the end tag.
|
132
|
-
If True, streams reasoning content as it arrives.
|
133
|
-
"""
|
134
|
-
|
135
|
-
def __init__(self, stream_reasoning: bool = True, force_reasoning: bool = True):
|
136
|
-
# DeepSeek-R1 is assumed to be reasoning until `</think>` token
|
137
|
-
super().__init__(
|
138
|
-
"<think>",
|
139
|
-
"</think>",
|
140
|
-
force_reasoning=True,
|
141
|
-
stream_reasoning=stream_reasoning,
|
142
|
-
)
|
143
|
-
# https://github.com/sgl-project/sglang/pull/3202#discussion_r1950153599
|
144
|
-
|
145
|
-
|
146
|
-
class Qwen3Detector(BaseReasoningFormatDetector):
|
147
|
-
"""
|
148
|
-
Detector for Qwen3 models (e.g., Qwen/Qwen3-235B-A22B).
|
149
|
-
Assumes reasoning format:
|
150
|
-
(<think>)*(.*)</think>
|
151
|
-
|
152
|
-
Qwen3 models released before 07/2025 supports switching between thinking mode and normal
|
153
|
-
mode using `enable_thinking` parameter in the request parameter.
|
154
|
-
- enable_thinking=True: "<think>reasoning content</think>The answer is 42."
|
155
|
-
- enable_thinking=False: "The answer is 42." (no thinking tokens)
|
156
|
-
|
157
|
-
Args:
|
158
|
-
stream_reasoning (bool): If False, accumulates reasoning content until the end tag.
|
159
|
-
If True, streams reasoning content as it arrives.
|
160
|
-
"""
|
161
|
-
|
162
|
-
def __init__(self, stream_reasoning: bool = True, force_reasoning: bool = False):
|
163
|
-
super().__init__(
|
164
|
-
"<think>",
|
165
|
-
"</think>",
|
166
|
-
force_reasoning=force_reasoning,
|
167
|
-
stream_reasoning=stream_reasoning,
|
168
|
-
)
|
169
|
-
|
170
|
-
|
171
|
-
class KimiDetector(BaseReasoningFormatDetector):
|
172
|
-
"""
|
173
|
-
Detector for Kimi Thinking model.
|
174
|
-
Assumes reasoning format:
|
175
|
-
◁think▷*(.*)◁/think▷
|
176
|
-
Returns all the text before the ◁/think▷ tag as `reasoning_text`
|
177
|
-
and the rest of the text as `normal_text`.
|
178
|
-
"""
|
179
|
-
|
180
|
-
def __init__(self, stream_reasoning: bool = True, force_reasoning: bool = False):
|
181
|
-
super().__init__(
|
182
|
-
"◁think▷",
|
183
|
-
"◁/think▷",
|
184
|
-
force_reasoning=False,
|
185
|
-
stream_reasoning=stream_reasoning,
|
186
|
-
)
|
187
|
-
|
188
|
-
|
189
|
-
class GptOssDetector(BaseReasoningFormatDetector):
|
190
|
-
"""
|
191
|
-
Detector for T4-style reasoning format.
|
192
|
-
|
193
|
-
Assumes reasoning format with two channels:
|
194
|
-
<|channel|>analysis<|message|>...reasoning content...<|end|>
|
195
|
-
<|start|>assistant<|channel|>final<|message|>...final answer...<|return|>
|
196
|
-
|
197
|
-
Returns content from 'analysis' channel as reasoning_text
|
198
|
-
and content from 'final' channel as normal_text.
|
199
|
-
|
200
|
-
Args:
|
201
|
-
stream_reasoning (bool): If False, accumulates reasoning content until complete.
|
202
|
-
If True, streams reasoning content as it arrives.
|
203
|
-
"""
|
204
|
-
|
205
|
-
def __init__(self, stream_reasoning: bool = True, force_reasoning: bool = True):
|
206
|
-
# TypeScript uses channel tokens instead of simple start/end tokens
|
207
|
-
super().__init__(
|
208
|
-
"<|channel|>analysis<|message|>",
|
209
|
-
"<|end|>",
|
210
|
-
force_reasoning=True,
|
211
|
-
stream_reasoning=stream_reasoning,
|
212
|
-
)
|
213
|
-
self.final_channel_start = "<|start|>assistant<|channel|>final<|message|>"
|
214
|
-
self.final_channel_end = "<|return|>"
|
215
|
-
self._in_final_channel = False
|
216
|
-
self._analysis_complete = False
|
217
|
-
self._in_reasoning = True
|
218
|
-
|
219
|
-
def detect_and_parse(self, text: str) -> StreamingParseResult:
|
220
|
-
"""
|
221
|
-
One-time parsing: Detects and parses both analysis and final channels.
|
222
|
-
Tool call channels are preserved in normal_text for downstream processing.
|
223
|
-
|
224
|
-
HACK: Also handles simplified format where text starts with "analysis" and transitions
|
225
|
-
to "assistantfinal" without full channel markers.
|
226
|
-
"""
|
227
|
-
# HACK: Handle simplified format (analysis...assistantfinal) without channel markers
|
228
|
-
if (
|
229
|
-
text.startswith("analysis")
|
230
|
-
and "assistantfinal" in text
|
231
|
-
and "<|channel|>" not in text
|
232
|
-
):
|
233
|
-
# Split on "assistantfinal"
|
234
|
-
parts = text.split("assistantfinal", 1)
|
235
|
-
self._in_reasoning = False
|
236
|
-
if len(parts) == 2:
|
237
|
-
reasoning_text = parts[0][
|
238
|
-
len("analysis") :
|
239
|
-
].strip() # Remove "analysis" prefix
|
240
|
-
normal_text = parts[1].strip()
|
241
|
-
return StreamingParseResult(
|
242
|
-
normal_text=normal_text, reasoning_text=reasoning_text
|
243
|
-
)
|
244
|
-
|
245
|
-
reasoning_parts = []
|
246
|
-
normal_parts = []
|
247
|
-
current_pos = 0
|
248
|
-
|
249
|
-
# Process text sequentially to preserve tool calls between analysis sections
|
250
|
-
while current_pos < len(text):
|
251
|
-
# Look for next analysis channel
|
252
|
-
analysis_start_idx = text.find(self.think_start_token, current_pos)
|
253
|
-
|
254
|
-
if analysis_start_idx == -1:
|
255
|
-
# No more analysis channels, rest goes to remaining
|
256
|
-
break
|
257
|
-
|
258
|
-
# Preserve any content before this analysis channel (could include tool calls)
|
259
|
-
if analysis_start_idx > current_pos:
|
260
|
-
between_content = text[current_pos:analysis_start_idx]
|
261
|
-
# This content will be added to normal_parts later
|
262
|
-
normal_parts.append(between_content)
|
263
|
-
|
264
|
-
# Extract analysis content
|
265
|
-
analysis_content_start = analysis_start_idx + len(self.think_start_token)
|
266
|
-
analysis_end_idx = text.find(self.think_end_token, analysis_content_start)
|
267
|
-
|
268
|
-
if analysis_end_idx != -1:
|
269
|
-
reasoning_parts.append(
|
270
|
-
text[analysis_content_start:analysis_end_idx].strip()
|
271
|
-
)
|
272
|
-
current_pos = analysis_end_idx + len(self.think_end_token)
|
273
|
-
else:
|
274
|
-
# Analysis not complete
|
275
|
-
reasoning_parts.append(text[analysis_content_start:].strip())
|
276
|
-
reasoning_text = "".join(reasoning_parts)
|
277
|
-
return StreamingParseResult(reasoning_text=reasoning_text)
|
278
|
-
|
279
|
-
# Add any remaining text after all analysis sections
|
280
|
-
if current_pos < len(text):
|
281
|
-
remaining = text[current_pos:]
|
282
|
-
normal_parts.append(remaining)
|
283
|
-
|
284
|
-
# Process non-analysis content for commentary sections
|
285
|
-
full_normal_text = "".join(normal_parts)
|
286
|
-
|
287
|
-
# Extract reasoning from non-tool-call commentary sections
|
288
|
-
# Tool calls have "to=" in their header, regular commentary does not
|
289
|
-
commentary_pattern = re.compile(
|
290
|
-
r"<\|start\|>assistant<\|channel\|>commentary<\|message\|>(.*?)(?:<\|end\|>|<\|call\|>)",
|
291
|
-
re.DOTALL,
|
292
|
-
)
|
293
|
-
|
294
|
-
cleaned_text = full_normal_text
|
295
|
-
for match in reversed(list(commentary_pattern.finditer(full_normal_text))):
|
296
|
-
# Check if this commentary is a tool call by looking at the text before <|message|>
|
297
|
-
match_start = match.start()
|
298
|
-
# Find where "<|channel|>commentary" starts within the matched pattern
|
299
|
-
# The pattern starts with "<|start|>assistant<|channel|>commentary"
|
300
|
-
# So we look for the text between "commentary" and "<|message|>" in the match
|
301
|
-
match_text = full_normal_text[match_start : match.end()]
|
302
|
-
commentary_idx = match_text.find("<|channel|>commentary")
|
303
|
-
if commentary_idx != -1:
|
304
|
-
message_idx = match_text.find("<|message|>", commentary_idx)
|
305
|
-
if message_idx != -1:
|
306
|
-
between_text = match_text[commentary_idx:message_idx]
|
307
|
-
# If no "to=" found, this is regular commentary (reasoning content)
|
308
|
-
if " to=" not in between_text:
|
309
|
-
content = match.group(1).strip()
|
310
|
-
reasoning_parts.append(content)
|
311
|
-
# Remove this commentary section from normal text
|
312
|
-
cleaned_text = (
|
313
|
-
cleaned_text[: match.start()] + cleaned_text[match.end() :]
|
314
|
-
)
|
315
|
-
|
316
|
-
full_normal_text = cleaned_text
|
317
|
-
|
318
|
-
# Combine all reasoning parts
|
319
|
-
reasoning_text = "".join(reasoning_parts)
|
320
|
-
|
321
|
-
# Process full_normal_text for final output
|
322
|
-
normal_text = ""
|
323
|
-
if self.final_channel_start in full_normal_text:
|
324
|
-
final_start = full_normal_text.find(self.final_channel_start)
|
325
|
-
final_content_start = final_start + len(self.final_channel_start)
|
326
|
-
final_end = full_normal_text.find(
|
327
|
-
self.final_channel_end, final_content_start
|
328
|
-
)
|
329
|
-
|
330
|
-
if final_end != -1:
|
331
|
-
# Extract content before final channel (includes tool calls)
|
332
|
-
before_final = full_normal_text[:final_start].strip()
|
333
|
-
# Extract ONLY the final channel content (not the channel markers)
|
334
|
-
final_text = full_normal_text[final_content_start:final_end].strip()
|
335
|
-
# Extract content after final channel
|
336
|
-
after_final = full_normal_text[
|
337
|
-
final_end + len(self.final_channel_end) :
|
338
|
-
].strip()
|
339
|
-
|
340
|
-
# For tool calls + final answer: concatenate tool calls with final text
|
341
|
-
parts = []
|
342
|
-
if before_final:
|
343
|
-
parts.append(before_final)
|
344
|
-
if final_text:
|
345
|
-
parts.append(final_text)
|
346
|
-
if after_final:
|
347
|
-
parts.append(after_final)
|
348
|
-
normal_text = " ".join(parts)
|
349
|
-
else:
|
350
|
-
# Final channel not complete - extract what we have
|
351
|
-
# Look for just <|channel|>final<|message|> without <|return|>
|
352
|
-
alt_final_start = full_normal_text.find("<|channel|>final<|message|>")
|
353
|
-
if alt_final_start != -1:
|
354
|
-
before_alt_final = full_normal_text[:alt_final_start].strip()
|
355
|
-
alt_final_content = full_normal_text[
|
356
|
-
alt_final_start + len("<|channel|>final<|message|>") :
|
357
|
-
].strip()
|
358
|
-
|
359
|
-
parts = []
|
360
|
-
if before_alt_final:
|
361
|
-
parts.append(before_alt_final)
|
362
|
-
if alt_final_content:
|
363
|
-
parts.append(alt_final_content)
|
364
|
-
normal_text = " ".join(parts)
|
365
|
-
else:
|
366
|
-
normal_text = full_normal_text.strip()
|
367
|
-
else:
|
368
|
-
# No final channel, treat all as normal text (includes tool calls)
|
369
|
-
normal_text = full_normal_text.strip()
|
370
|
-
|
371
|
-
return StreamingParseResult(
|
372
|
-
normal_text=normal_text, reasoning_text=reasoning_text
|
373
|
-
)
|
374
|
-
|
375
|
-
def parse_streaming_increment(self, new_text: str) -> StreamingParseResult:
|
376
|
-
"""
|
377
|
-
Streaming incremental parsing for GPT-OSS format.
|
378
|
-
|
379
|
-
This is a simplified streaming implementation that accumulates content
|
380
|
-
and delegates to the non-streaming parser for complex multi-channel parsing.
|
381
|
-
TODO: Implement proper incremental parsing for better streaming performance.
|
382
|
-
"""
|
383
|
-
self._buffer += new_text
|
384
|
-
|
385
|
-
if not self._in_reasoning:
|
386
|
-
return StreamingParseResult(normal_text=new_text)
|
387
|
-
|
388
|
-
# Check if we have complete sections to process
|
389
|
-
# For GPT-OSS, we need to wait for complete channel sections
|
390
|
-
# HACK: For now, use simplified approach - wait for key markers before processing
|
391
|
-
key_markers = ["<|end|>", "<|call|>", "<|return|>", "assistantfinal"]
|
392
|
-
has_complete_section = any(marker in self._buffer for marker in key_markers)
|
393
|
-
|
394
|
-
if not has_complete_section:
|
395
|
-
# Still accumulating, don't process yet
|
396
|
-
return StreamingParseResult()
|
397
|
-
|
398
|
-
# Handle simplified format (analysis...assistantfinal) with true incremental streaming
|
399
|
-
if (
|
400
|
-
"<|channel|>" not in self._buffer
|
401
|
-
): # Simplified format without channel markers
|
402
|
-
if self._buffer.startswith("analysis"):
|
403
|
-
# Check if we have the transition to assistantfinal
|
404
|
-
if "assistantfinal" in self._buffer:
|
405
|
-
self._in_reasoning = False
|
406
|
-
# Complete reasoning section - extract and stream it
|
407
|
-
parts = self._buffer.split("assistantfinal", 1)
|
408
|
-
reasoning_text = parts[0][len("analysis") :].strip()
|
409
|
-
final_content = parts[1].strip()
|
410
|
-
|
411
|
-
# Clear buffer and return both reasoning and final content
|
412
|
-
self._buffer = ""
|
413
|
-
return StreamingParseResult(
|
414
|
-
reasoning_text=reasoning_text if self.stream_reasoning else "",
|
415
|
-
normal_text=final_content,
|
416
|
-
)
|
417
|
-
elif self.stream_reasoning:
|
418
|
-
# Stream reasoning content incrementally as it arrives
|
419
|
-
current_reasoning = self._buffer[len("analysis") :].strip()
|
420
|
-
self._buffer = ""
|
421
|
-
return StreamingParseResult(reasoning_text=current_reasoning)
|
422
|
-
else:
|
423
|
-
# Wait for assistantfinal
|
424
|
-
return StreamingParseResult()
|
425
|
-
elif self._buffer.startswith("assistantfinal"):
|
426
|
-
# Direct final content without analysis
|
427
|
-
final_content = self._buffer[len("assistantfinal") :].strip()
|
428
|
-
self._buffer = ""
|
429
|
-
return StreamingParseResult(normal_text=final_content)
|
430
|
-
|
431
|
-
# For full channel format, process sections as they complete
|
432
|
-
result = StreamingParseResult()
|
433
|
-
|
434
|
-
# Process complete analysis sections
|
435
|
-
while (
|
436
|
-
self.think_start_token in self._buffer
|
437
|
-
and self.think_end_token in self._buffer
|
438
|
-
):
|
439
|
-
start_idx = self._buffer.find(self.think_start_token)
|
440
|
-
start_pos = start_idx + len(self.think_start_token)
|
441
|
-
end_pos = self._buffer.find(self.think_end_token, start_pos)
|
442
|
-
|
443
|
-
if end_pos != -1:
|
444
|
-
reasoning_content = self._buffer[start_pos:end_pos].strip()
|
445
|
-
if self.stream_reasoning and reasoning_content:
|
446
|
-
result.reasoning_text += reasoning_content
|
447
|
-
|
448
|
-
# Remove processed analysis section
|
449
|
-
self._buffer = (
|
450
|
-
self._buffer[:start_idx]
|
451
|
-
+ self._buffer[end_pos + len(self.think_end_token) :]
|
452
|
-
)
|
453
|
-
else:
|
454
|
-
break
|
455
|
-
|
456
|
-
# Process complete commentary sections
|
457
|
-
commentary_pattern = re.compile(
|
458
|
-
r"<\|start\|>assistant<\|channel\|>commentary<\|message\|>(.*?)(?:<\|end\|>|<\|call\|>)",
|
459
|
-
re.DOTALL,
|
460
|
-
)
|
461
|
-
|
462
|
-
for match in reversed(list(commentary_pattern.finditer(self._buffer))):
|
463
|
-
# Check if this is a tool call
|
464
|
-
start_pos = match.start()
|
465
|
-
commentary_content = match.group(1).strip()
|
466
|
-
if self.stream_reasoning and commentary_content:
|
467
|
-
result.reasoning_text += commentary_content
|
468
|
-
|
469
|
-
# Remove this commentary section
|
470
|
-
self._buffer = self._buffer[: match.start()] + self._buffer[match.end() :]
|
471
|
-
# Clean up any standalone <|start|>assistant
|
472
|
-
self._buffer = re.sub(
|
473
|
-
r"<\|start\|>assistant(?=<\|start\|>assistant)", "", self._buffer
|
474
|
-
)
|
475
|
-
|
476
|
-
# Handle final channel completion
|
477
|
-
if self.final_channel_start in self._buffer:
|
478
|
-
final_start = self._buffer.find(self.final_channel_start)
|
479
|
-
final_content_start = final_start + len(self.final_channel_start)
|
480
|
-
|
481
|
-
# Check if final channel is complete
|
482
|
-
final_end = self._buffer.find(self.final_channel_end, final_content_start)
|
483
|
-
if final_end != -1:
|
484
|
-
# Complete final channel - process everything
|
485
|
-
final_result = self.detect_and_parse(self._buffer)
|
486
|
-
self._buffer = ""
|
487
|
-
return StreamingParseResult(
|
488
|
-
normal_text=final_result.normal_text,
|
489
|
-
reasoning_text=result.reasoning_text + final_result.reasoning_text,
|
490
|
-
)
|
491
|
-
else:
|
492
|
-
# Extract content before final channel (e.g. tool calls)
|
493
|
-
before_final = self._buffer[:final_start]
|
494
|
-
if before_final:
|
495
|
-
# Output tool calls for processing
|
496
|
-
result.normal_text += before_final
|
497
|
-
# Keep the final channel part in buffer
|
498
|
-
self._buffer = self._buffer[final_start:]
|
499
|
-
|
500
|
-
return result
|
501
|
-
|
502
|
-
|
503
|
-
class ReasoningParser:
|
504
|
-
"""
|
505
|
-
Parser that handles both streaming and non-streaming scenarios for extracting
|
506
|
-
reasoning content from model outputs.
|
507
|
-
|
508
|
-
Args:
|
509
|
-
model_type (str): Type of model to parse reasoning from
|
510
|
-
stream_reasoning (bool): If False, accumulates reasoning content until complete.
|
511
|
-
If True, streams reasoning content as it arrives.
|
512
|
-
"""
|
513
|
-
|
514
|
-
DetectorMap: Dict[str, Type[BaseReasoningFormatDetector]] = {
|
515
|
-
"deepseek-r1": DeepSeekR1Detector,
|
516
|
-
"deepseek-v3": Qwen3Detector,
|
517
|
-
"glm45": Qwen3Detector,
|
518
|
-
"gpt-oss": GptOssDetector,
|
519
|
-
"kimi": KimiDetector,
|
520
|
-
"qwen3": Qwen3Detector,
|
521
|
-
"qwen3-thinking": Qwen3Detector,
|
522
|
-
"step3": DeepSeekR1Detector,
|
523
|
-
}
|
524
|
-
|
525
|
-
def __init__(
|
526
|
-
self,
|
527
|
-
model_type: Optional[str] = None,
|
528
|
-
stream_reasoning: bool = True,
|
529
|
-
force_reasoning: bool = False,
|
530
|
-
):
|
531
|
-
if not model_type:
|
532
|
-
raise ValueError("Model type must be specified")
|
533
|
-
|
534
|
-
detector_class = self.DetectorMap.get(model_type.lower())
|
535
|
-
if not detector_class:
|
536
|
-
raise ValueError(f"Unsupported model type: {model_type}")
|
537
|
-
|
538
|
-
if model_type.lower() == "qwen3-thinking":
|
539
|
-
force_reasoning = True
|
540
|
-
|
541
|
-
self.detector = detector_class(
|
542
|
-
stream_reasoning=stream_reasoning, force_reasoning=force_reasoning
|
543
|
-
)
|
544
|
-
|
545
|
-
def parse_non_stream(self, full_text: str) -> Tuple[str, str]:
|
546
|
-
"""Non-streaming call: one-time parsing"""
|
547
|
-
ret = self.detector.detect_and_parse(full_text)
|
548
|
-
return ret.reasoning_text, ret.normal_text
|
549
|
-
|
550
|
-
def parse_stream_chunk(self, chunk_text: str) -> Tuple[str, str]:
|
551
|
-
"""Streaming call: incremental parsing"""
|
552
|
-
ret = self.detector.parse_streaming_increment(chunk_text)
|
553
|
-
return ret.reasoning_text, ret.normal_text
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|