sglang 0.4.7.post1__py3-none-any.whl → 0.4.8.post1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. sglang/bench_one_batch.py +8 -6
  2. sglang/srt/_custom_ops.py +2 -2
  3. sglang/srt/code_completion_parser.py +2 -44
  4. sglang/srt/configs/model_config.py +1 -0
  5. sglang/srt/constants.py +3 -0
  6. sglang/srt/conversation.py +14 -3
  7. sglang/srt/custom_op.py +11 -1
  8. sglang/srt/disaggregation/base/conn.py +2 -0
  9. sglang/srt/disaggregation/decode.py +22 -28
  10. sglang/srt/disaggregation/decode_schedule_batch_mixin.py +4 -3
  11. sglang/srt/disaggregation/mini_lb.py +34 -4
  12. sglang/srt/disaggregation/mooncake/conn.py +301 -64
  13. sglang/srt/disaggregation/mooncake/transfer_engine.py +31 -1
  14. sglang/srt/disaggregation/nixl/conn.py +94 -46
  15. sglang/srt/disaggregation/prefill.py +20 -15
  16. sglang/srt/disaggregation/utils.py +47 -18
  17. sglang/srt/distributed/parallel_state.py +12 -4
  18. sglang/srt/entrypoints/engine.py +27 -31
  19. sglang/srt/entrypoints/http_server.py +149 -79
  20. sglang/srt/entrypoints/http_server_engine.py +0 -3
  21. sglang/srt/entrypoints/openai/__init__.py +0 -0
  22. sglang/srt/{openai_api → entrypoints/openai}/protocol.py +115 -34
  23. sglang/srt/entrypoints/openai/serving_base.py +149 -0
  24. sglang/srt/entrypoints/openai/serving_chat.py +897 -0
  25. sglang/srt/entrypoints/openai/serving_completions.py +425 -0
  26. sglang/srt/entrypoints/openai/serving_embedding.py +170 -0
  27. sglang/srt/entrypoints/openai/serving_rerank.py +102 -0
  28. sglang/srt/entrypoints/openai/serving_score.py +61 -0
  29. sglang/srt/entrypoints/openai/usage_processor.py +81 -0
  30. sglang/srt/entrypoints/openai/utils.py +72 -0
  31. sglang/srt/function_call/base_format_detector.py +7 -4
  32. sglang/srt/function_call/deepseekv3_detector.py +1 -1
  33. sglang/srt/function_call/ebnf_composer.py +64 -10
  34. sglang/srt/function_call/function_call_parser.py +6 -6
  35. sglang/srt/function_call/llama32_detector.py +1 -1
  36. sglang/srt/function_call/mistral_detector.py +1 -1
  37. sglang/srt/function_call/pythonic_detector.py +1 -1
  38. sglang/srt/function_call/qwen25_detector.py +1 -1
  39. sglang/srt/{openai_api/utils.py → jinja_template_utils.py} +6 -5
  40. sglang/srt/layers/activation.py +28 -3
  41. sglang/srt/layers/attention/aiter_backend.py +5 -2
  42. sglang/srt/layers/attention/base_attn_backend.py +1 -1
  43. sglang/srt/layers/attention/cutlass_mla_backend.py +1 -0
  44. sglang/srt/layers/attention/flashattention_backend.py +43 -23
  45. sglang/srt/layers/attention/flashinfer_backend.py +9 -6
  46. sglang/srt/layers/attention/flashinfer_mla_backend.py +7 -4
  47. sglang/srt/layers/attention/flashmla_backend.py +5 -2
  48. sglang/srt/layers/attention/tbo_backend.py +3 -3
  49. sglang/srt/layers/attention/triton_backend.py +19 -11
  50. sglang/srt/layers/communicator.py +5 -5
  51. sglang/srt/layers/dp_attention.py +11 -2
  52. sglang/srt/layers/layernorm.py +44 -2
  53. sglang/srt/layers/linear.py +18 -1
  54. sglang/srt/layers/logits_processor.py +14 -5
  55. sglang/srt/layers/moe/ep_moe/kernels.py +159 -2
  56. sglang/srt/layers/moe/ep_moe/layer.py +286 -13
  57. sglang/srt/layers/moe/ep_moe/token_dispatcher.py +19 -2
  58. sglang/srt/layers/moe/fused_moe_native.py +7 -0
  59. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=128,N=384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
  60. sglang/srt/layers/moe/fused_moe_triton/fused_moe.py +13 -2
  61. sglang/srt/layers/moe/fused_moe_triton/layer.py +148 -26
  62. sglang/srt/layers/moe/topk.py +117 -4
  63. sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +6 -2
  64. sglang/srt/layers/quantization/fp8.py +25 -17
  65. sglang/srt/layers/quantization/fp8_utils.py +5 -4
  66. sglang/srt/layers/quantization/modelopt_quant.py +62 -8
  67. sglang/srt/layers/quantization/utils.py +5 -2
  68. sglang/srt/layers/rotary_embedding.py +144 -12
  69. sglang/srt/layers/sampler.py +1 -1
  70. sglang/srt/layers/vocab_parallel_embedding.py +14 -1
  71. sglang/srt/lora/lora_manager.py +173 -74
  72. sglang/srt/lora/mem_pool.py +49 -45
  73. sglang/srt/lora/utils.py +1 -1
  74. sglang/srt/managers/cache_controller.py +33 -15
  75. sglang/srt/managers/expert_distribution.py +21 -0
  76. sglang/srt/managers/io_struct.py +19 -14
  77. sglang/srt/managers/multimodal_processors/base_processor.py +44 -9
  78. sglang/srt/managers/multimodal_processors/gemma3n.py +97 -0
  79. sglang/srt/managers/schedule_batch.py +49 -32
  80. sglang/srt/managers/schedule_policy.py +70 -56
  81. sglang/srt/managers/scheduler.py +189 -68
  82. sglang/srt/managers/template_manager.py +226 -0
  83. sglang/srt/managers/tokenizer_manager.py +11 -8
  84. sglang/srt/managers/tp_worker.py +12 -2
  85. sglang/srt/managers/tp_worker_overlap_thread.py +11 -0
  86. sglang/srt/mem_cache/{paged_allocator.py → allocator.py} +125 -34
  87. sglang/srt/mem_cache/base_prefix_cache.py +52 -8
  88. sglang/srt/mem_cache/chunk_cache.py +11 -16
  89. sglang/srt/mem_cache/hiradix_cache.py +34 -23
  90. sglang/srt/mem_cache/memory_pool.py +118 -114
  91. sglang/srt/mem_cache/radix_cache.py +20 -16
  92. sglang/srt/model_executor/cuda_graph_runner.py +77 -46
  93. sglang/srt/model_executor/forward_batch_info.py +18 -5
  94. sglang/srt/model_executor/model_runner.py +27 -8
  95. sglang/srt/model_loader/loader.py +50 -8
  96. sglang/srt/model_loader/weight_utils.py +100 -2
  97. sglang/srt/models/deepseek_nextn.py +35 -30
  98. sglang/srt/models/deepseek_v2.py +255 -30
  99. sglang/srt/models/gemma3n_audio.py +949 -0
  100. sglang/srt/models/gemma3n_causal.py +1009 -0
  101. sglang/srt/models/gemma3n_mm.py +511 -0
  102. sglang/srt/models/glm4.py +312 -0
  103. sglang/srt/models/hunyuan.py +771 -0
  104. sglang/srt/models/mimo_mtp.py +2 -18
  105. sglang/srt/reasoning_parser.py +21 -11
  106. sglang/srt/server_args.py +51 -9
  107. sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +131 -10
  108. sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py +125 -12
  109. sglang/srt/speculative/eagle_utils.py +80 -8
  110. sglang/srt/speculative/eagle_worker.py +124 -41
  111. sglang/srt/torch_memory_saver_adapter.py +19 -15
  112. sglang/srt/two_batch_overlap.py +4 -1
  113. sglang/srt/utils.py +248 -11
  114. sglang/test/test_block_fp8_ep.py +1 -0
  115. sglang/test/test_utils.py +1 -0
  116. sglang/version.py +1 -1
  117. {sglang-0.4.7.post1.dist-info → sglang-0.4.8.post1.dist-info}/METADATA +4 -10
  118. {sglang-0.4.7.post1.dist-info → sglang-0.4.8.post1.dist-info}/RECORD +121 -105
  119. sglang/srt/entrypoints/verl_engine.py +0 -179
  120. sglang/srt/openai_api/adapter.py +0 -2148
  121. {sglang-0.4.7.post1.dist-info → sglang-0.4.8.post1.dist-info}/WHEEL +0 -0
  122. {sglang-0.4.7.post1.dist-info → sglang-0.4.8.post1.dist-info}/licenses/LICENSE +0 -0
  123. {sglang-0.4.7.post1.dist-info → sglang-0.4.8.post1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,897 @@
1
+ import copy
2
+ import json
3
+ import logging
4
+ import time
5
+ import uuid
6
+ from typing import Any, AsyncGenerator, Dict, List, Optional, Union
7
+
8
+ from fastapi import Request
9
+ from fastapi.responses import ORJSONResponse, StreamingResponse
10
+
11
+ from sglang.srt.conversation import generate_chat_conv
12
+ from sglang.srt.entrypoints.openai.protocol import (
13
+ ChatCompletionRequest,
14
+ ChatCompletionResponse,
15
+ ChatCompletionResponseChoice,
16
+ ChatCompletionResponseStreamChoice,
17
+ ChatCompletionStreamResponse,
18
+ ChatCompletionTokenLogprob,
19
+ ChatMessage,
20
+ ChoiceLogprobs,
21
+ DeltaMessage,
22
+ ErrorResponse,
23
+ FunctionResponse,
24
+ LogProbs,
25
+ MessageProcessingResult,
26
+ ToolCall,
27
+ TopLogprob,
28
+ )
29
+ from sglang.srt.entrypoints.openai.serving_base import OpenAIServingBase
30
+ from sglang.srt.entrypoints.openai.usage_processor import UsageProcessor
31
+ from sglang.srt.entrypoints.openai.utils import (
32
+ process_hidden_states_from_ret,
33
+ to_openai_style_logprobs,
34
+ )
35
+ from sglang.srt.function_call.function_call_parser import FunctionCallParser
36
+ from sglang.srt.jinja_template_utils import process_content_for_template_format
37
+ from sglang.srt.managers.io_struct import GenerateReqInput
38
+ from sglang.srt.managers.template_manager import TemplateManager
39
+ from sglang.srt.managers.tokenizer_manager import TokenizerManager
40
+ from sglang.srt.reasoning_parser import ReasoningParser
41
+ from sglang.utils import convert_json_schema_to_str
42
+
43
+ logger = logging.getLogger(__name__)
44
+
45
+
46
+ class OpenAIServingChat(OpenAIServingBase):
47
+ """Handler for /v1/chat/completions requests"""
48
+
49
+ def __init__(
50
+ self, tokenizer_manager: TokenizerManager, template_manager: TemplateManager
51
+ ):
52
+ super().__init__(tokenizer_manager)
53
+ self.template_manager = template_manager
54
+
55
+ def _request_id_prefix(self) -> str:
56
+ return "chatcmpl-"
57
+
58
+ def _convert_to_internal_request(
59
+ self,
60
+ request: ChatCompletionRequest,
61
+ ) -> tuple[GenerateReqInput, ChatCompletionRequest]:
62
+ """Convert OpenAI chat completion request to internal format"""
63
+ is_multimodal = self.tokenizer_manager.model_config.is_multimodal
64
+
65
+ # Process messages and apply chat template
66
+ processed_messages = self._process_messages(request, is_multimodal)
67
+
68
+ # Build sampling parameters
69
+ sampling_params = self._build_sampling_params(
70
+ request, processed_messages.stop, processed_messages.tool_call_constraint
71
+ )
72
+
73
+ # Handle single vs multiple requests
74
+ if is_multimodal:
75
+ prompt_kwargs = {"text": processed_messages.prompt}
76
+ else:
77
+ if isinstance(processed_messages.prompt_ids, str):
78
+ prompt_kwargs = {"text": processed_messages.prompt_ids}
79
+ else:
80
+ prompt_kwargs = {"input_ids": processed_messages.prompt_ids}
81
+
82
+ adapted_request = GenerateReqInput(
83
+ **prompt_kwargs,
84
+ image_data=processed_messages.image_data,
85
+ audio_data=processed_messages.audio_data,
86
+ sampling_params=sampling_params,
87
+ return_logprob=request.logprobs,
88
+ logprob_start_len=-1,
89
+ top_logprobs_num=request.top_logprobs or 0,
90
+ stream=request.stream,
91
+ return_text_in_logprobs=True,
92
+ modalities=processed_messages.modalities,
93
+ lora_path=request.lora_path,
94
+ bootstrap_host=request.bootstrap_host,
95
+ bootstrap_port=request.bootstrap_port,
96
+ bootstrap_room=request.bootstrap_room,
97
+ return_hidden_states=request.return_hidden_states,
98
+ rid=request.rid,
99
+ )
100
+
101
+ return adapted_request, request
102
+
103
+ def _process_messages(
104
+ self, request: ChatCompletionRequest, is_multimodal: bool
105
+ ) -> MessageProcessingResult:
106
+ """Process chat messages and apply chat template"""
107
+ tool_call_constraint = None
108
+
109
+ # Apply chat template and its stop strings
110
+ tools = None
111
+ if request.tools and request.tool_choice != "none":
112
+ request.skip_special_tokens = False
113
+ if not isinstance(request.tool_choice, str):
114
+ tools = [
115
+ item.function.model_dump()
116
+ for item in request.tools
117
+ if item.function.name == request.tool_choice.function.name
118
+ ]
119
+ else:
120
+ tools = [item.function.model_dump() for item in request.tools]
121
+
122
+ tool_call_parser = self.tokenizer_manager.server_args.tool_call_parser
123
+ parser = FunctionCallParser(request.tools, tool_call_parser)
124
+ tool_call_constraint = parser.get_structure_constraint(request.tool_choice)
125
+
126
+ # Use chat template
127
+ if self.template_manager.chat_template_name is None:
128
+ result = self._apply_jinja_template(request, tools, is_multimodal)
129
+ else:
130
+ result = self._apply_conversation_template(request, is_multimodal)
131
+
132
+ result.tool_call_constraint = tool_call_constraint
133
+ return result
134
+
135
+ def _apply_jinja_template(
136
+ self,
137
+ request: ChatCompletionRequest,
138
+ tools: Optional[List[Dict]],
139
+ is_multimodal: bool,
140
+ ) -> MessageProcessingResult:
141
+ """Apply Jinja chat template"""
142
+ prompt = ""
143
+ prompt_ids = []
144
+ openai_compatible_messages = []
145
+ image_data = []
146
+ audio_data = []
147
+ modalities = []
148
+
149
+ template_content_format = self.template_manager.jinja_template_content_format
150
+
151
+ for message in request.messages:
152
+ if message.content is None:
153
+ message.content = ""
154
+ msg_dict = message.model_dump()
155
+
156
+ # Process content based on detected template format
157
+ processed_msg = process_content_for_template_format(
158
+ msg_dict,
159
+ template_content_format,
160
+ image_data,
161
+ audio_data,
162
+ modalities,
163
+ )
164
+ openai_compatible_messages.append(processed_msg)
165
+
166
+ # Handle assistant prefix for continue_final_message
167
+ assistant_prefix = None
168
+ if (
169
+ openai_compatible_messages
170
+ and openai_compatible_messages[-1]["role"] == "assistant"
171
+ ):
172
+ if request.continue_final_message:
173
+ assistant_prefix = openai_compatible_messages[-1]["content"]
174
+ openai_compatible_messages = openai_compatible_messages[:-1]
175
+
176
+ try:
177
+ prompt_ids = self.tokenizer_manager.tokenizer.apply_chat_template(
178
+ openai_compatible_messages,
179
+ tokenize=True,
180
+ add_generation_prompt=True,
181
+ tools=tools,
182
+ **(
183
+ request.chat_template_kwargs if request.chat_template_kwargs else {}
184
+ ),
185
+ )
186
+ except Exception:
187
+ # This except branch will be triggered when the chosen model
188
+ # has a different tools input format that is not compatible
189
+ # with openAI's apply_chat_template tool_call format, like Mistral.
190
+ tools = (
191
+ [t if "function" in t else {"function": t} for t in tools]
192
+ if tools
193
+ else None
194
+ )
195
+ prompt_ids = self.tokenizer_manager.tokenizer.apply_chat_template(
196
+ openai_compatible_messages,
197
+ tokenize=True,
198
+ add_generation_prompt=True,
199
+ tools=tools,
200
+ **(
201
+ request.chat_template_kwargs if request.chat_template_kwargs else {}
202
+ ),
203
+ )
204
+
205
+ if assistant_prefix:
206
+ encoded = self.tokenizer_manager.tokenizer.encode(assistant_prefix)
207
+ if encoded and encoded[0] == self.tokenizer_manager.tokenizer.bos_token_id:
208
+ encoded = encoded[1:]
209
+ prompt_ids += encoded
210
+
211
+ if is_multimodal:
212
+ prompt = self.tokenizer_manager.tokenizer.decode(prompt_ids)
213
+
214
+ stop = request.stop
215
+ image_data = image_data if image_data else None
216
+ audio_data = audio_data if audio_data else None
217
+ modalities = modalities if modalities else []
218
+ return MessageProcessingResult(
219
+ prompt=prompt,
220
+ prompt_ids=prompt_ids,
221
+ image_data=image_data,
222
+ audio_data=audio_data,
223
+ modalities=modalities,
224
+ stop=stop,
225
+ )
226
+
227
+ def _apply_conversation_template(
228
+ self,
229
+ request: ChatCompletionRequest,
230
+ is_multimodal: bool,
231
+ ) -> MessageProcessingResult:
232
+ """Apply conversation template"""
233
+ prompt = ""
234
+ prompt_ids = []
235
+ conv = generate_chat_conv(request, self.template_manager.chat_template_name)
236
+
237
+ # If we should continue the final assistant message, adjust the conversation.
238
+ if (
239
+ request.continue_final_message
240
+ and request.messages
241
+ and request.messages[-1].role == "assistant"
242
+ ):
243
+ # Remove the auto-added blank assistant turn, if present.
244
+ if conv.messages and conv.messages[-1][1] is None:
245
+ conv.messages.pop()
246
+ # Rebuild the prompt from the conversation.
247
+ prompt = conv.get_prompt()
248
+ # Strip trailing stop tokens or separators that indicate end-of-assistant.
249
+ if isinstance(conv.stop_str, list):
250
+ for stop_token in conv.stop_str:
251
+ if prompt.endswith(stop_token):
252
+ prompt = prompt[: -len(stop_token)]
253
+ elif isinstance(conv.stop_str, str) and prompt.endswith(conv.stop_str):
254
+ prompt = prompt[: -len(conv.stop_str)]
255
+ if conv.sep and prompt.endswith(conv.sep):
256
+ prompt = prompt[: -len(conv.sep)]
257
+ if getattr(conv, "sep2", None) and prompt.endswith(conv.sep2):
258
+ prompt = prompt[: -len(conv.sep2)]
259
+ else:
260
+ prompt = conv.get_prompt()
261
+
262
+ image_data = conv.image_data if conv.image_data else None
263
+ audio_data = conv.audio_data if conv.audio_data else None
264
+ modalities = conv.modalities if conv.modalities else []
265
+ stop = copy.copy(conv.stop_str or [] if not request.ignore_eos else [])
266
+
267
+ if request.stop:
268
+ if isinstance(request.stop, str):
269
+ stop.append(request.stop)
270
+ else:
271
+ stop.extend(request.stop)
272
+
273
+ if not is_multimodal:
274
+ prompt_ids = self.tokenizer_manager.tokenizer.encode(prompt)
275
+
276
+ return MessageProcessingResult(
277
+ prompt=prompt,
278
+ prompt_ids=prompt_ids,
279
+ image_data=image_data,
280
+ audio_data=audio_data,
281
+ modalities=modalities,
282
+ stop=stop,
283
+ )
284
+
285
+ def _build_sampling_params(
286
+ self,
287
+ request: ChatCompletionRequest,
288
+ stop: List[str],
289
+ tool_call_constraint: Optional[Any],
290
+ ) -> Dict[str, Any]:
291
+ """Build sampling parameters for the request"""
292
+
293
+ sampling_params = {
294
+ "temperature": request.temperature,
295
+ "max_new_tokens": request.max_tokens or request.max_completion_tokens,
296
+ "min_new_tokens": request.min_tokens,
297
+ "stop": stop,
298
+ "stop_token_ids": request.stop_token_ids,
299
+ "top_p": request.top_p,
300
+ "top_k": request.top_k,
301
+ "min_p": request.min_p,
302
+ "presence_penalty": request.presence_penalty,
303
+ "frequency_penalty": request.frequency_penalty,
304
+ "repetition_penalty": request.repetition_penalty,
305
+ "regex": request.regex,
306
+ "ebnf": request.ebnf,
307
+ "n": request.n,
308
+ "no_stop_trim": request.no_stop_trim,
309
+ "ignore_eos": request.ignore_eos,
310
+ "skip_special_tokens": request.skip_special_tokens,
311
+ "logit_bias": request.logit_bias,
312
+ }
313
+
314
+ if request.response_format and request.response_format.type == "json_schema":
315
+ sampling_params["json_schema"] = convert_json_schema_to_str(
316
+ request.response_format.json_schema.schema_
317
+ )
318
+ elif request.response_format and request.response_format.type == "json_object":
319
+ sampling_params["json_schema"] = '{"type": "object"}'
320
+ elif (
321
+ request.response_format and request.response_format.type == "structural_tag"
322
+ ):
323
+ sampling_params["structural_tag"] = convert_json_schema_to_str(
324
+ request.response_format.model_dump(by_alias=True)
325
+ )
326
+
327
+ # Check if there are already existing output constraints
328
+ has_existing_constraints = (
329
+ sampling_params.get("regex")
330
+ or sampling_params.get("ebnf")
331
+ or sampling_params.get("structural_tag")
332
+ or sampling_params.get("json_schema")
333
+ )
334
+
335
+ if tool_call_constraint and has_existing_constraints:
336
+ logger.warning("Constrained decoding is not compatible with tool calls.")
337
+ elif tool_call_constraint:
338
+ constraint_type, constraint_value = tool_call_constraint
339
+ if constraint_type == "structural_tag":
340
+ sampling_params[constraint_type] = convert_json_schema_to_str(
341
+ constraint_value.model_dump(by_alias=True)
342
+ )
343
+ else:
344
+ sampling_params[constraint_type] = constraint_value
345
+ return sampling_params
346
+
347
+ async def _handle_streaming_request(
348
+ self,
349
+ adapted_request: GenerateReqInput,
350
+ request: ChatCompletionRequest,
351
+ raw_request: Request,
352
+ ) -> StreamingResponse:
353
+ """Handle streaming chat completion request"""
354
+ return StreamingResponse(
355
+ self._generate_chat_stream(adapted_request, request, raw_request),
356
+ media_type="text/event-stream",
357
+ background=self.tokenizer_manager.create_abort_task(adapted_request),
358
+ )
359
+
360
+ async def _generate_chat_stream(
361
+ self,
362
+ adapted_request: GenerateReqInput,
363
+ request: ChatCompletionRequest,
364
+ raw_request: Request,
365
+ ) -> AsyncGenerator[str, None]:
366
+ """Generate streaming chat completion response"""
367
+ # Parsers for tool calls and reasoning
368
+ parser_dict = {}
369
+ reasoning_parser_dict = {}
370
+
371
+ # State tracking for streaming
372
+ is_firsts = {}
373
+ stream_buffers = {}
374
+ n_prev_tokens = {}
375
+
376
+ # Usage tracking
377
+ prompt_tokens = {}
378
+ completion_tokens = {}
379
+ cached_tokens = {}
380
+ hidden_states = {}
381
+
382
+ try:
383
+ async for content in self.tokenizer_manager.generate_request(
384
+ adapted_request, raw_request
385
+ ):
386
+ index = content.get("index", 0)
387
+
388
+ prompt_tokens[index] = content["meta_info"]["prompt_tokens"]
389
+ completion_tokens[index] = content["meta_info"]["completion_tokens"]
390
+ cached_tokens[index] = content["meta_info"].get("cached_tokens", 0)
391
+ hidden_states[index] = content["meta_info"].get("hidden_states", None)
392
+
393
+ # Handle logprobs
394
+ choice_logprobs = None
395
+ if request.logprobs:
396
+ choice_logprobs = self._process_streaming_logprobs(
397
+ content, n_prev_tokens.get(index, 0)
398
+ )
399
+ n_prev_tokens[index] = len(
400
+ content["meta_info"]["output_token_logprobs"]
401
+ )
402
+
403
+ finish_reason = content["meta_info"]["finish_reason"]
404
+ finish_reason_type = finish_reason["type"] if finish_reason else None
405
+
406
+ # First chunk with role
407
+ if is_firsts.get(index, True):
408
+ is_firsts[index] = False
409
+ delta = DeltaMessage(role="assistant", content="")
410
+ choice_data = ChatCompletionResponseStreamChoice(
411
+ index=index,
412
+ delta=delta,
413
+ finish_reason=finish_reason_type,
414
+ matched_stop=(
415
+ finish_reason["matched"]
416
+ if finish_reason and "matched" in finish_reason
417
+ else None
418
+ ),
419
+ logprobs=choice_logprobs,
420
+ )
421
+ chunk = ChatCompletionStreamResponse(
422
+ id=content["meta_info"]["id"],
423
+ created=int(time.time()),
424
+ choices=[choice_data],
425
+ model=request.model,
426
+ )
427
+ yield f"data: {chunk.model_dump_json()}\n\n"
428
+
429
+ # Process content delta
430
+ stream_buffer = stream_buffers.get(index, "")
431
+ delta = content["text"][len(stream_buffer) :]
432
+ stream_buffers[index] = stream_buffer + delta
433
+
434
+ # Handle reasoning content
435
+ if (
436
+ self.tokenizer_manager.server_args.reasoning_parser
437
+ and request.separate_reasoning
438
+ ):
439
+ reasoning_text, delta = self._process_reasoning_stream(
440
+ index, delta, reasoning_parser_dict, content, request
441
+ )
442
+ if reasoning_text:
443
+ choice_data = ChatCompletionResponseStreamChoice(
444
+ index=index,
445
+ delta=DeltaMessage(reasoning_content=reasoning_text),
446
+ finish_reason=finish_reason_type,
447
+ )
448
+ chunk = ChatCompletionStreamResponse(
449
+ id=content["meta_info"]["id"],
450
+ created=int(time.time()),
451
+ choices=[choice_data],
452
+ model=request.model,
453
+ )
454
+ yield f"data: {chunk.model_dump_json()}\n\n"
455
+
456
+ if not delta:
457
+ continue
458
+
459
+ # Handle tool calls
460
+ if request.tool_choice != "none" and request.tools:
461
+ async for chunk in self._process_tool_call_stream(
462
+ index,
463
+ delta,
464
+ parser_dict,
465
+ content,
466
+ request,
467
+ finish_reason_type,
468
+ ):
469
+ yield chunk
470
+ else:
471
+ # Regular content
472
+ if delta or not (
473
+ request.stream_options and request.stream_options.include_usage
474
+ ):
475
+ choice_data = ChatCompletionResponseStreamChoice(
476
+ index=index,
477
+ delta=DeltaMessage(content=delta if delta else None),
478
+ finish_reason=(
479
+ None
480
+ if request.stream_options
481
+ and request.stream_options.include_usage
482
+ else finish_reason_type
483
+ ),
484
+ matched_stop=(
485
+ finish_reason["matched"]
486
+ if finish_reason and "matched" in finish_reason
487
+ else None
488
+ ),
489
+ logprobs=choice_logprobs,
490
+ )
491
+ chunk = ChatCompletionStreamResponse(
492
+ id=content["meta_info"]["id"],
493
+ created=int(time.time()),
494
+ choices=[choice_data],
495
+ model=request.model,
496
+ )
497
+ yield f"data: {chunk.model_dump_json()}\n\n"
498
+
499
+ # Final chunk with finish_reason
500
+ finish_reason_chunk = ChatCompletionStreamResponse(
501
+ id=content["meta_info"]["id"],
502
+ created=int(time.time()),
503
+ choices=[
504
+ ChatCompletionResponseStreamChoice(
505
+ index=index,
506
+ delta=DeltaMessage(),
507
+ finish_reason=finish_reason_type,
508
+ matched_stop=(
509
+ finish_reason["matched"]
510
+ if finish_reason and "matched" in finish_reason
511
+ else None
512
+ ),
513
+ )
514
+ ],
515
+ model=request.model,
516
+ usage=None,
517
+ )
518
+ yield f"data: {finish_reason_chunk.model_dump_json()}\n\n"
519
+
520
+ # Send hidden states if requested
521
+ if request.return_hidden_states and hidden_states:
522
+ for index, choice_hidden_states in hidden_states.items():
523
+ if choice_hidden_states:
524
+ last_token_hidden_states = (
525
+ choice_hidden_states[-1]
526
+ if len(choice_hidden_states) > 1
527
+ else []
528
+ )
529
+ hidden_states_chunk = ChatCompletionStreamResponse(
530
+ id=content["meta_info"]["id"],
531
+ created=int(time.time()),
532
+ choices=[
533
+ ChatCompletionResponseStreamChoice(
534
+ index=index,
535
+ delta=DeltaMessage(
536
+ hidden_states=last_token_hidden_states
537
+ ),
538
+ finish_reason=finish_reason_type,
539
+ )
540
+ ],
541
+ model=request.model,
542
+ )
543
+ yield f"data: {hidden_states_chunk.model_dump_json()}\n\n"
544
+
545
+ # Additional usage chunk
546
+ if request.stream_options and request.stream_options.include_usage:
547
+ usage = UsageProcessor.calculate_streaming_usage(
548
+ prompt_tokens,
549
+ completion_tokens,
550
+ cached_tokens,
551
+ n_choices=request.n,
552
+ enable_cache_report=self.tokenizer_manager.server_args.enable_cache_report,
553
+ )
554
+ usage_chunk = ChatCompletionStreamResponse(
555
+ id=content["meta_info"]["id"],
556
+ created=int(time.time()),
557
+ choices=[], # Empty choices array as per OpenAI spec
558
+ model=request.model,
559
+ usage=usage,
560
+ )
561
+ yield f"data: {usage_chunk.model_dump_json()}\n\n"
562
+
563
+ except ValueError as e:
564
+ error = self.create_streaming_error_response(str(e))
565
+ yield f"data: {error}\n\n"
566
+
567
+ yield "data: [DONE]\n\n"
568
+
569
+ async def _handle_non_streaming_request(
570
+ self,
571
+ adapted_request: GenerateReqInput,
572
+ request: ChatCompletionRequest,
573
+ raw_request: Request,
574
+ ) -> Union[ChatCompletionResponse, ErrorResponse, ORJSONResponse]:
575
+ """Handle non-streaming chat completion request"""
576
+ try:
577
+ ret = await self.tokenizer_manager.generate_request(
578
+ adapted_request, raw_request
579
+ ).__anext__()
580
+ except ValueError as e:
581
+ return self.create_error_response(str(e))
582
+
583
+ if not isinstance(ret, list):
584
+ ret = [ret]
585
+
586
+ response = self._build_chat_response(
587
+ request,
588
+ ret,
589
+ int(time.time()),
590
+ )
591
+
592
+ return response
593
+
594
+ def _build_chat_response(
595
+ self,
596
+ request: ChatCompletionRequest,
597
+ ret: List[Dict[str, Any]],
598
+ created: int,
599
+ ) -> Union[ChatCompletionResponse, ORJSONResponse]:
600
+ """Build chat completion response from generation results"""
601
+ choices = []
602
+
603
+ for idx, ret_item in enumerate(ret):
604
+ # Process logprobs
605
+ choice_logprobs = None
606
+ if request.logprobs:
607
+ choice_logprobs = self._process_response_logprobs(ret_item)
608
+
609
+ # Handle hidden states
610
+ hidden_states = process_hidden_states_from_ret(ret_item, request)
611
+
612
+ finish_reason = ret_item["meta_info"]["finish_reason"]
613
+ text = ret_item["text"]
614
+
615
+ # Handle reasoning content
616
+ reasoning_text = None
617
+ reasoning_parser = self.tokenizer_manager.server_args.reasoning_parser
618
+ if reasoning_parser and request.separate_reasoning:
619
+ try:
620
+ parser = ReasoningParser(
621
+ model_type=reasoning_parser, stream_reasoning=False
622
+ )
623
+ reasoning_text, text = parser.parse_non_stream(text)
624
+ except Exception as e:
625
+ logger.error(f"Reasoning parsing error: {e}")
626
+ return self.create_error_response(
627
+ "Failed to parse reasoning content",
628
+ err_type="InternalServerError",
629
+ status_code=500,
630
+ )
631
+
632
+ # Handle tool calls
633
+ tool_calls = None
634
+ if request.tool_choice != "none" and request.tools:
635
+ tool_call_parser = self.tokenizer_manager.server_args.tool_call_parser
636
+ tool_calls, text, finish_reason = self._process_tool_calls(
637
+ text, request.tools, tool_call_parser, finish_reason
638
+ )
639
+
640
+ choice_data = ChatCompletionResponseChoice(
641
+ index=idx,
642
+ message=ChatMessage(
643
+ role="assistant",
644
+ content=text if text else None,
645
+ tool_calls=tool_calls,
646
+ reasoning_content=reasoning_text if reasoning_text else None,
647
+ ),
648
+ logprobs=choice_logprobs,
649
+ finish_reason=finish_reason["type"] if finish_reason else None,
650
+ matched_stop=(
651
+ finish_reason["matched"]
652
+ if finish_reason and "matched" in finish_reason
653
+ else None
654
+ ),
655
+ hidden_states=hidden_states,
656
+ )
657
+ choices.append(choice_data)
658
+
659
+ # Calculate usage
660
+ usage = UsageProcessor.calculate_response_usage(
661
+ ret,
662
+ n_choices=request.n,
663
+ enable_cache_report=self.tokenizer_manager.server_args.enable_cache_report,
664
+ )
665
+
666
+ return ChatCompletionResponse(
667
+ id=ret[0]["meta_info"]["id"],
668
+ created=created,
669
+ model=request.model,
670
+ choices=choices,
671
+ usage=usage,
672
+ )
673
+
674
+ def _process_logprobs_tokens(
675
+ self, logprobs: LogProbs, use_token_index: bool = False
676
+ ) -> List[ChatCompletionTokenLogprob]:
677
+ """Common helper to process logprobs tokens for both streaming and non-streaming
678
+
679
+ Args:
680
+ logprobs: LogProbs data from model
681
+ use_token_index: True for non-streaming (use token_idx), False for streaming (use index 0)
682
+ """
683
+ token_logprobs = []
684
+
685
+ for token_idx, (token, logprob) in enumerate(
686
+ zip(logprobs.tokens, logprobs.token_logprobs)
687
+ ):
688
+ token_bytes = list(token.encode("utf-8"))
689
+ top_logprobs = []
690
+ if logprobs.top_logprobs:
691
+ # - Non-streaming (use_token_index=True): uses token_idx for full data
692
+ # - Streaming (use_token_index=False): uses index 0 for pre-sliced data
693
+ top_logprobs_idx = token_idx if use_token_index else 0
694
+ for top_token, top_logprob in logprobs.top_logprobs[
695
+ top_logprobs_idx
696
+ ].items():
697
+ top_token_bytes = list(top_token.encode("utf-8"))
698
+ top_logprobs.append(
699
+ TopLogprob(
700
+ token=top_token,
701
+ bytes=top_token_bytes,
702
+ logprob=top_logprob,
703
+ )
704
+ )
705
+ token_logprobs.append(
706
+ ChatCompletionTokenLogprob(
707
+ token=token,
708
+ bytes=token_bytes,
709
+ logprob=logprob,
710
+ top_logprobs=top_logprobs,
711
+ )
712
+ )
713
+
714
+ return token_logprobs
715
+
716
+ def _process_response_logprobs(self, ret_item: Dict[str, Any]) -> ChoiceLogprobs:
717
+ """Process logprobs for non-streaming response"""
718
+ logprobs = to_openai_style_logprobs(
719
+ output_token_logprobs=ret_item["meta_info"]["output_token_logprobs"],
720
+ output_top_logprobs=ret_item["meta_info"].get("output_top_logprobs", None),
721
+ )
722
+
723
+ token_logprobs = self._process_logprobs_tokens(logprobs, use_token_index=True)
724
+ return ChoiceLogprobs(content=token_logprobs)
725
+
726
+ def _process_tool_calls(
727
+ self,
728
+ text: str,
729
+ tools: List[Any],
730
+ tool_call_parser: Optional[str],
731
+ finish_reason: Dict[str, Any],
732
+ ) -> tuple[Optional[List[ToolCall]], str, Dict[str, Any]]:
733
+ """Process tool calls in the response"""
734
+ parser = FunctionCallParser(tools, tool_call_parser)
735
+ if parser.has_tool_call(text):
736
+ if finish_reason["type"] == "stop":
737
+ finish_reason["type"] = "tool_calls"
738
+ finish_reason["matched"] = None
739
+ try:
740
+ text, call_info_list = parser.parse_non_stream(text)
741
+ tool_calls = [
742
+ ToolCall(
743
+ id=f"call_{uuid.uuid4().hex[:24]}",
744
+ function=FunctionResponse(
745
+ name=call_info.name, arguments=call_info.parameters
746
+ ),
747
+ )
748
+ for call_info in call_info_list
749
+ ]
750
+ return tool_calls, text, finish_reason
751
+ except Exception as e:
752
+ logger.error(f"Tool call parsing error: {e}")
753
+ # Return error but don't fail the whole request
754
+ return None, text, finish_reason
755
+
756
+ return None, text, finish_reason
757
+
758
+ def _process_streaming_logprobs(
759
+ self, content: Dict[str, Any], n_prev_token: int
760
+ ) -> ChoiceLogprobs:
761
+ """Process logprobs for streaming response"""
762
+ logprobs = to_openai_style_logprobs(
763
+ output_token_logprobs=content["meta_info"]["output_token_logprobs"][
764
+ n_prev_token:
765
+ ],
766
+ output_top_logprobs=content["meta_info"].get("output_top_logprobs", [])[
767
+ n_prev_token:
768
+ ],
769
+ )
770
+
771
+ token_logprobs = self._process_logprobs_tokens(logprobs, use_token_index=False)
772
+ return ChoiceLogprobs(content=token_logprobs)
773
+
774
+ def _process_reasoning_stream(
775
+ self,
776
+ index: int,
777
+ delta: str,
778
+ reasoning_parser_dict: Dict[int, ReasoningParser],
779
+ content: Dict[str, Any],
780
+ request: ChatCompletionRequest,
781
+ ) -> tuple[Optional[str], str]:
782
+ """Process reasoning content in streaming response"""
783
+ if index not in reasoning_parser_dict:
784
+ reasoning_parser_dict[index] = ReasoningParser(
785
+ self.tokenizer_manager.server_args.reasoning_parser,
786
+ request.stream_reasoning,
787
+ )
788
+ reasoning_parser = reasoning_parser_dict[index]
789
+ return reasoning_parser.parse_stream_chunk(delta)
790
+
791
+ def _get_enable_thinking_from_request(request: ChatCompletionRequest) -> bool:
792
+ """Extracts the 'enable_thinking' flag from request chat_template_kwargs.
793
+
794
+ NOTE: This parameter is only useful for models that support enable_thinking
795
+ flag, such as Qwen3.
796
+
797
+ Args:
798
+ request_obj: The request object (or an item from a list of requests).
799
+ Returns:
800
+ The boolean value of 'enable_thinking' if found and not True, otherwise True.
801
+ """
802
+ if (
803
+ hasattr(request, "chat_template_kwargs")
804
+ and request.chat_template_kwargs
805
+ and request.chat_template_kwargs.get("enable_thinking") is not None
806
+ ):
807
+ return request.chat_template_kwargs.get("enable_thinking")
808
+ return True
809
+
810
+ async def _process_tool_call_stream(
811
+ self,
812
+ index: int,
813
+ delta: str,
814
+ parser_dict: Dict[int, FunctionCallParser],
815
+ content: Dict[str, Any],
816
+ request: ChatCompletionRequest,
817
+ finish_reason_type: Optional[str],
818
+ ):
819
+ """Process tool calls in streaming response"""
820
+ if index not in parser_dict:
821
+ parser_dict[index] = FunctionCallParser(
822
+ tools=request.tools,
823
+ tool_call_parser=self.tokenizer_manager.server_args.tool_call_parser,
824
+ )
825
+ parser = parser_dict[index]
826
+
827
+ normal_text, calls = parser.parse_stream_chunk(delta)
828
+
829
+ # Yield normal text
830
+ if normal_text:
831
+ choice_data = ChatCompletionResponseStreamChoice(
832
+ index=index,
833
+ delta=DeltaMessage(content=normal_text),
834
+ finish_reason=finish_reason_type,
835
+ )
836
+ chunk = ChatCompletionStreamResponse(
837
+ id=content["meta_info"]["id"],
838
+ created=int(time.time()),
839
+ choices=[choice_data],
840
+ model=request.model,
841
+ )
842
+ yield f"data: {chunk.model_dump_json()}\n\n"
843
+
844
+ # Yield tool calls
845
+ for call_item in calls:
846
+ # Tool call ID should be generated only once per tool call
847
+ if call_item.name:
848
+ # First chunk: include ID and function name
849
+ tool_call_id = f"call_{uuid.uuid4().hex[:24]}"
850
+ function_name = call_item.name
851
+ else:
852
+ # Subsequent chunks: null ID and name for argument deltas
853
+ tool_call_id = None
854
+ function_name = None
855
+
856
+ if finish_reason_type == "stop":
857
+ # Handle remaining arguments
858
+ latest_delta_len = 0
859
+ if isinstance(call_item.parameters, str):
860
+ latest_delta_len = len(call_item.parameters)
861
+
862
+ expected_call = json.dumps(
863
+ parser.detector.prev_tool_call_arr[index].get("arguments", {}),
864
+ ensure_ascii=False,
865
+ )
866
+ actual_call = parser.detector.streamed_args_for_tool[index]
867
+ if latest_delta_len > 0:
868
+ actual_call = actual_call[:-latest_delta_len]
869
+ remaining_call = expected_call.replace(actual_call, "", 1)
870
+ call_item.parameters = remaining_call
871
+ finish_reason_type = "tool_calls"
872
+
873
+ tool_call = ToolCall(
874
+ id=tool_call_id,
875
+ index=call_item.tool_index,
876
+ function=FunctionResponse(
877
+ name=function_name,
878
+ arguments=call_item.parameters,
879
+ ),
880
+ )
881
+
882
+ choice_data = ChatCompletionResponseStreamChoice(
883
+ index=index,
884
+ delta=DeltaMessage(tool_calls=[tool_call]),
885
+ finish_reason=(
886
+ None
887
+ if request.stream_options and request.stream_options.include_usage
888
+ else finish_reason_type
889
+ ),
890
+ )
891
+ chunk = ChatCompletionStreamResponse(
892
+ id=content["meta_info"]["id"],
893
+ created=int(time.time()),
894
+ choices=[choice_data],
895
+ model=request.model,
896
+ )
897
+ yield f"data: {chunk.model_dump_json()}\n\n"