coze-coding-utils 0.2.1__py3-none-any.whl → 0.2.2a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. coze_coding_utils/__init__.py +1 -1
  2. coze_coding_utils/error/__init__.py +31 -0
  3. coze_coding_utils/error/classifier.py +320 -0
  4. coze_coding_utils/error/codes.py +356 -0
  5. coze_coding_utils/error/exceptions.py +439 -0
  6. coze_coding_utils/error/patterns.py +939 -0
  7. coze_coding_utils/error/test_classifier.py +0 -0
  8. coze_coding_utils/file/__init__.py +0 -0
  9. coze_coding_utils/file/file.py +327 -0
  10. coze_coding_utils/helper/__init__.py +0 -0
  11. coze_coding_utils/helper/agent_helper.py +599 -0
  12. coze_coding_utils/helper/graph_helper.py +231 -0
  13. coze_coding_utils/log/__init__.py +0 -0
  14. coze_coding_utils/log/common.py +8 -0
  15. coze_coding_utils/log/config.py +10 -0
  16. coze_coding_utils/log/err_trace.py +88 -0
  17. coze_coding_utils/log/loop_trace.py +72 -0
  18. coze_coding_utils/log/node_log.py +487 -0
  19. coze_coding_utils/log/parser.py +255 -0
  20. coze_coding_utils/log/write_log.py +183 -0
  21. coze_coding_utils/messages/__init__.py +0 -0
  22. coze_coding_utils/messages/client.py +48 -0
  23. coze_coding_utils/messages/server.py +173 -0
  24. coze_coding_utils/openai/__init__.py +5 -0
  25. coze_coding_utils/openai/converter/__init__.py +6 -0
  26. coze_coding_utils/openai/converter/request_converter.py +165 -0
  27. coze_coding_utils/openai/converter/response_converter.py +467 -0
  28. coze_coding_utils/openai/handler.py +298 -0
  29. coze_coding_utils/openai/types/__init__.py +37 -0
  30. coze_coding_utils/openai/types/request.py +24 -0
  31. coze_coding_utils/openai/types/response.py +178 -0
  32. {coze_coding_utils-0.2.1.dist-info → coze_coding_utils-0.2.2a1.dist-info}/METADATA +2 -2
  33. coze_coding_utils-0.2.2a1.dist-info/RECORD +37 -0
  34. coze_coding_utils-0.2.1.dist-info/RECORD +0 -7
  35. {coze_coding_utils-0.2.1.dist-info → coze_coding_utils-0.2.2a1.dist-info}/WHEEL +0 -0
  36. {coze_coding_utils-0.2.1.dist-info → coze_coding_utils-0.2.2a1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,165 @@
1
+ """OpenAI 请求转换器: OpenAI Request → LangGraph Input"""
2
+
3
+ from typing import Dict, Any, List
4
+ from coze_coding_utils.openai.types.request import (
5
+ ChatCompletionRequest,
6
+ ChatMessage,
7
+ )
8
+ from coze_coding_utils.file.file import File, FileOps, infer_file_category
9
+
10
+
11
+ class RequestConverter:
12
+ """将 OpenAI 请求转换为 LangGraph 输入"""
13
+
14
+ @staticmethod
15
+ def parse(payload: Dict[str, Any]) -> ChatCompletionRequest:
16
+ """解析 JSON payload 为 ChatCompletionRequest"""
17
+ messages_raw = payload.get("messages", [])
18
+ messages: List[ChatMessage] = []
19
+
20
+ for msg_data in messages_raw:
21
+ messages.append(ChatMessage(
22
+ role=msg_data.get("role", "user"),
23
+ content=msg_data.get("content"),
24
+ tool_calls=msg_data.get("tool_calls"),
25
+ tool_call_id=msg_data.get("tool_call_id"),
26
+ ))
27
+
28
+ return ChatCompletionRequest(
29
+ messages=messages,
30
+ model=payload.get("model", "default"),
31
+ stream=payload.get("stream", False),
32
+ session_id=payload.get("session_id", ""),
33
+ temperature=payload.get("temperature"),
34
+ max_tokens=payload.get("max_tokens"),
35
+ )
36
+
37
+ @staticmethod
38
+ def get_session_id(request: ChatCompletionRequest) -> str:
39
+ """提取 session_id"""
40
+ return request.session_id
41
+
42
+ @staticmethod
43
+ def to_stream_input(request: ChatCompletionRequest) -> Dict[str, Any]:
44
+ """
45
+ 转换为 LangGraph stream 输入格式
46
+
47
+ 只取最后一条 user 消息进行处理,历史由 session_id + checkpointer 管理
48
+ """
49
+ # 找到最后一条 user 消息
50
+ last_user_msg: ChatMessage | None = None
51
+ for msg in reversed(request.messages):
52
+ if msg.role == "user":
53
+ last_user_msg = msg
54
+ break
55
+
56
+ if last_user_msg is None:
57
+ return {"messages": []}
58
+
59
+ content_parts = RequestConverter._convert_content(last_user_msg.content)
60
+ return {"messages": [{"role": "user", "content": content_parts}]}
61
+
62
+ @staticmethod
63
+ def _convert_content(content: Any) -> List[Dict[str, Any]]:
64
+ """
65
+ 转换消息内容为 LangGraph 格式
66
+
67
+ 支持:
68
+ - 字符串: 直接转为 text 类型
69
+ - 列表: 处理多模态内容 (text, image_url, video_url)
70
+ """
71
+ if content is None:
72
+ return []
73
+
74
+ # 字符串内容
75
+ if isinstance(content, str):
76
+ return [{"type": "text", "text": content}]
77
+
78
+ # 多模态内容列表
79
+ if isinstance(content, list):
80
+ result: List[Dict[str, Any]] = []
81
+ for part in content:
82
+ converted = RequestConverter._convert_content_part(part)
83
+ result.extend(converted)
84
+ return result
85
+
86
+ return []
87
+
88
+ @staticmethod
89
+ def _convert_content_part(part: Dict[str, Any]) -> List[Dict[str, Any]]:
90
+ """转换单个内容部分"""
91
+ part_type = part.get("type", "text")
92
+
93
+ if part_type == "text":
94
+ text = part.get("text", "")
95
+ if text:
96
+ return [{"type": "text", "text": text}]
97
+ return []
98
+
99
+ if part_type == "image_url":
100
+ image_url_data = part.get("image_url", {})
101
+ url = image_url_data.get("url", "")
102
+ if url:
103
+ return [
104
+ {"type": "text", "text": url},
105
+ {"type": "image_url", "image_url": {"url": url}},
106
+ ]
107
+ return []
108
+
109
+ if part_type == "video_url":
110
+ video_url_data = part.get("video_url", {})
111
+ url = video_url_data.get("url", "")
112
+ if url:
113
+ return [
114
+ {"type": "text", "text": url},
115
+ {"type": "video_url", "video_url": {"url": url}},
116
+ ]
117
+ return []
118
+
119
+ if part_type == "audio_url":
120
+ audio_url_data = part.get("audio_url", {})
121
+ url = audio_url_data.get("url", "")
122
+ if url:
123
+ return [{"type": "text", "text": f"audio url: {url}"}]
124
+ return []
125
+
126
+ if part_type == "file_url":
127
+ # 处理文件 URL,提取文件内容
128
+ file_url_data = part.get("file_url", {})
129
+ url = file_url_data.get("url", "")
130
+ file_name = file_url_data.get("file_name", "")
131
+ if url:
132
+ return RequestConverter._process_file_url(url, file_name)
133
+ return []
134
+
135
+ return []
136
+
137
+ @staticmethod
138
+ def _process_file_url(url: str, file_name: str = "") -> List[Dict[str, Any]]:
139
+ """处理文件 URL,根据文件类型进行不同处理"""
140
+ try:
141
+ file_type, _ = infer_file_category(url)
142
+ file_data = File(url=url, file_type=file_type)
143
+
144
+ if file_type == "image":
145
+ return [
146
+ {"type": "text", "text": url},
147
+ {"type": "image_url", "image_url": {"url": url}},
148
+ ]
149
+ elif file_type == "video":
150
+ return [
151
+ {"type": "text", "text": url},
152
+ {"type": "video_url", "video_url": {"url": url}},
153
+ ]
154
+ elif file_type == "audio":
155
+ return [{"type": "text", "text": f"audio url: {url}"}]
156
+ else:
157
+ # 其他文件类型,尝试提取文本内容
158
+ file_content = FileOps.extract_text(file_data)
159
+ return [{
160
+ "type": "text",
161
+ "text": f"file name: {file_name}, url: {url}\n\nFile Content:\n{file_content}",
162
+ }]
163
+ except Exception:
164
+ # 如果文件处理失败,返回 URL 文本
165
+ return [{"type": "text", "text": f"file url: {url}"}]
@@ -0,0 +1,467 @@
1
+ """OpenAI 响应转换器: LangGraph Stream → OpenAI Response"""
2
+
3
+ import json
4
+ import time
5
+ from typing import Iterator, Optional, List, Dict, Any
6
+
7
+ from coze_coding_utils.openai.types.response import (
8
+ ChatCompletionChunk,
9
+ ChatCompletionResponse,
10
+ ChunkChoice,
11
+ Delta,
12
+ ToolCallChunk,
13
+ ToolCallFunction,
14
+ Choice,
15
+ Message,
16
+ Usage,
17
+ )
18
+
19
+
20
+ class ResponseConverter:
21
+ """将 LangGraph 消息转换为 OpenAI 响应"""
22
+
23
+ def __init__(self, request_id: str, model: str = "default"):
24
+ self.request_id = request_id
25
+ self.model = model
26
+ self.created = int(time.time())
27
+ self._sent_role = False # 是否已发送 assistant role
28
+ # 工具调用流式状态
29
+ self._current_tool_calls: Dict[int, Dict[str, Any]] = {} # index -> {id, name, args}
30
+
31
+ def _create_chunk(
32
+ self,
33
+ delta: Delta,
34
+ finish_reason: Optional[str] = None,
35
+ ) -> ChatCompletionChunk:
36
+ """创建流式响应 chunk"""
37
+ return ChatCompletionChunk(
38
+ id=self.request_id,
39
+ object="chat.completion.chunk",
40
+ created=self.created,
41
+ model=self.model,
42
+ choices=[
43
+ ChunkChoice(
44
+ index=0,
45
+ delta=delta,
46
+ finish_reason=finish_reason,
47
+ )
48
+ ],
49
+ )
50
+
51
+ def iter_langgraph_stream(
52
+ self, items: Iterator[Any]
53
+ ) -> Iterator[str]:
54
+ """
55
+ 直接处理 LangGraph 原始流,实现工具参数的增量输出
56
+
57
+ Args:
58
+ items: graph.stream(stream_mode="messages") 返回的迭代器
59
+ 每个 item 是 (chunk, metadata) 元组
60
+
61
+ Yields:
62
+ SSE 格式字符串
63
+ """
64
+ # 跟踪是否已发送过 finish_reason(tool_calls 或 stop)
65
+ sent_finish_reason = False
66
+
67
+ for item in items:
68
+ chunk, meta = item
69
+ chunk_type = chunk.__class__.__name__
70
+
71
+ # 过滤 tools 节点的消息
72
+ if (meta or {}).get("langgraph_node") == "tools":
73
+ # 但是 ToolMessage 需要处理
74
+ if chunk_type != "ToolMessage":
75
+ continue
76
+
77
+ # 处理前检查是否有工具调用(用于判断是否会发送 tool_calls finish_reason)
78
+ had_tool_calls_before = bool(self._current_tool_calls)
79
+
80
+ for sse_chunk in self._process_langgraph_chunk(chunk, meta):
81
+ yield sse_chunk
82
+
83
+ # 检查是否在处理过程中发送了 tool_calls finish_reason
84
+ is_last = (meta or {}).get("chunk_position") == "last"
85
+ if chunk_type == "AIMessageChunk" and is_last and had_tool_calls_before:
86
+ # 处理过程中发送了 tool_calls finish_reason,重置标记
87
+ sent_finish_reason = True
88
+ elif chunk_type == "ToolMessage":
89
+ # ToolMessage 后面还会有 assistant 消息,重置标记
90
+ sent_finish_reason = False
91
+
92
+ # 流结束时,如果发送过 role 但没有发送过 finish_reason,发送 stop
93
+ if self._sent_role and not sent_finish_reason:
94
+ yield self._chunk_to_sse(self._create_chunk(Delta(), finish_reason="stop"))
95
+
96
+ yield "data: [DONE]\n\n"
97
+
98
+ def _process_langgraph_chunk(
99
+ self, chunk: Any, meta: Dict[str, Any]
100
+ ) -> Iterator[str]:
101
+ """处理单个 LangGraph chunk"""
102
+ chunk_type = chunk.__class__.__name__
103
+ is_last = (meta or {}).get("chunk_position") == "last"
104
+
105
+ if chunk_type == "AIMessageChunk":
106
+ yield from self._process_ai_message_chunk(chunk, meta, is_last)
107
+ elif chunk_type == "AIMessage":
108
+ yield from self._process_ai_message(chunk)
109
+ elif chunk_type == "ToolMessage":
110
+ yield from self._process_tool_message(chunk, meta, is_last)
111
+
112
+ def _process_ai_message_chunk(
113
+ self, chunk: Any, meta: Dict[str, Any], is_last: bool
114
+ ) -> Iterator[str]:
115
+ """处理 AIMessageChunk - 支持增量文本和工具调用"""
116
+ # 处理文本内容
117
+ text = getattr(chunk, "content", "")
118
+ if text:
119
+ # 先发送 role(如果还没发送)
120
+ if not self._sent_role:
121
+ self._sent_role = True
122
+ yield self._chunk_to_sse(self._create_chunk(Delta(role="assistant")))
123
+ yield self._chunk_to_sse(self._create_chunk(Delta(content=text)))
124
+
125
+ # 处理工具调用增量
126
+ tool_call_chunks = getattr(chunk, "tool_call_chunks", None)
127
+ if tool_call_chunks:
128
+ # 先发送 role(如果还没发送)
129
+ if not self._sent_role:
130
+ self._sent_role = True
131
+ yield self._chunk_to_sse(self._create_chunk(Delta(role="assistant")))
132
+
133
+ for tc_chunk in tool_call_chunks:
134
+ yield from self._process_tool_call_chunk(tc_chunk)
135
+
136
+ # 检查是否是工具调用结束
137
+ finish_reason = None
138
+ try:
139
+ resp_meta = getattr(chunk, "response_metadata", {})
140
+ if resp_meta and isinstance(resp_meta, dict):
141
+ finish_reason = resp_meta.get("finish_reason")
142
+ except Exception:
143
+ pass
144
+
145
+ if finish_reason == "tool_calls" or (is_last and self._current_tool_calls):
146
+ # 工具调用结束,发送 finish_reason
147
+ yield self._chunk_to_sse(self._create_chunk(Delta(), finish_reason="tool_calls"))
148
+ self._current_tool_calls = {}
149
+ self._sent_role = False
150
+
151
+ def _process_tool_call_chunk(self, tc_chunk: Any) -> Iterator[str]:
152
+ """处理单个工具调用增量 - 流式输出参数"""
153
+ # 获取 chunk 属性
154
+ if isinstance(tc_chunk, dict):
155
+ index = tc_chunk.get("index", 0)
156
+ tc_id = tc_chunk.get("id")
157
+ tc_name = tc_chunk.get("name")
158
+ tc_args = tc_chunk.get("args")
159
+ else:
160
+ index = getattr(tc_chunk, "index", 0)
161
+ tc_id = getattr(tc_chunk, "id", None)
162
+ tc_name = getattr(tc_chunk, "name", None)
163
+ tc_args = getattr(tc_chunk, "args", None)
164
+
165
+ if index is None:
166
+ return
167
+
168
+ # 规范化为字符串
169
+ tc_id_str = self._normalize_to_string(tc_id)
170
+ tc_name_str = self._normalize_to_string(tc_name)
171
+ tc_args_str = self._normalize_to_string(tc_args)
172
+
173
+ # 判断是否是新的工具调用
174
+ is_new_tool_call = index not in self._current_tool_calls
175
+
176
+ if is_new_tool_call:
177
+ # 新工具调用,发送 id 和 name
178
+ self._current_tool_calls[index] = {
179
+ "id": tc_id_str,
180
+ "name": tc_name_str,
181
+ "args": tc_args_str,
182
+ }
183
+ # 发送初始 chunk(包含 id 和 name)
184
+ tool_call = ToolCallChunk(
185
+ index=index,
186
+ id=tc_id_str if tc_id_str else None,
187
+ type="function",
188
+ function=ToolCallFunction(
189
+ name=tc_name_str,
190
+ arguments=tc_args_str,
191
+ ),
192
+ )
193
+ yield self._chunk_to_sse(self._create_chunk(Delta(tool_calls=[tool_call])))
194
+ else:
195
+ # 已存在的工具调用,累加并发送增量
196
+ existing = self._current_tool_calls[index]
197
+
198
+ # 累加 id(通常 id 只在第一个 chunk 中)
199
+ if tc_id_str:
200
+ existing["id"] += tc_id_str
201
+
202
+ # 累加 name(通常 name 只在前几个 chunk 中)
203
+ if tc_name_str:
204
+ existing["name"] += tc_name_str
205
+
206
+ # 累加 args(参数是主要的流式内容)
207
+ if tc_args_str:
208
+ existing["args"] += tc_args_str
209
+ # 发送参数增量
210
+ tool_call = ToolCallChunk(
211
+ index=index,
212
+ id=None, # 后续 chunk 不需要 id
213
+ type="function",
214
+ function=ToolCallFunction(
215
+ name="", # 后续 chunk 不需要 name
216
+ arguments=tc_args_str, # 只发送增量
217
+ ),
218
+ )
219
+ yield self._chunk_to_sse(self._create_chunk(Delta(tool_calls=[tool_call])))
220
+
221
+ def _process_ai_message(self, chunk: Any) -> Iterator[str]:
222
+ """处理完整的 AIMessage"""
223
+ text = getattr(chunk, "content", "")
224
+ if text:
225
+ if not self._sent_role:
226
+ self._sent_role = True
227
+ yield self._chunk_to_sse(self._create_chunk(Delta(role="assistant")))
228
+ yield self._chunk_to_sse(self._create_chunk(Delta(content=text)))
229
+
230
+ def _process_tool_message(
231
+ self, chunk: Any, meta: Dict[str, Any], is_last: bool
232
+ ) -> Iterator[str]:
233
+ """处理 ToolMessage - 工具执行结果"""
234
+ is_streaming = (meta or {}).get("chunk_position") is not None
235
+
236
+ # 只在完成时发送(非流式或 is_last)
237
+ if not is_streaming or is_last:
238
+ tool_call_id = getattr(chunk, "tool_call_id", "") or ""
239
+ result = getattr(chunk, "content", "") or ""
240
+
241
+ # 发送 tool 消息内容
242
+ yield self._chunk_to_sse(
243
+ self._create_chunk(
244
+ Delta(
245
+ role="tool",
246
+ tool_call_id=tool_call_id,
247
+ content=str(result),
248
+ )
249
+ )
250
+ )
251
+ # 发送 tool 消息的 finish chunk
252
+ yield self._chunk_to_sse(
253
+ self._create_chunk(Delta(), finish_reason="stop")
254
+ )
255
+
256
+ @staticmethod
257
+ def _normalize_to_string(value: Any) -> str:
258
+ """将值规范化为字符串"""
259
+ if value is None:
260
+ return ""
261
+ if isinstance(value, list):
262
+ return "".join(str(x) for x in value)
263
+ return str(value)
264
+
265
+ def _chunk_to_sse(self, chunk: ChatCompletionChunk) -> str:
266
+ """将 chunk 转换为 SSE 格式"""
267
+ return f"data: {json.dumps(chunk.to_dict(), ensure_ascii=False)}\n\n"
268
+
269
+ def collect_langgraph_to_response(
270
+ self, items: Iterator[Any]
271
+ ) -> ChatCompletionResponse:
272
+ """
273
+ 从 LangGraph 原始流收集消息,返回非流式响应
274
+
275
+ Args:
276
+ items: graph.stream(stream_mode="messages") 返回的迭代器
277
+
278
+ Note:
279
+ 非流式响应输出所有消息,包括 assistant、tool_calls、tool_response
280
+ 按消息顺序放入 choices 数组
281
+ """
282
+ # 收集所有消息,按顺序存储
283
+ all_messages: List[Dict[str, Any]] = []
284
+
285
+ # 当前 assistant 消息的累积状态
286
+ current_content_parts: List[str] = []
287
+ current_tool_calls: List[Dict[str, Any]] = []
288
+ accumulated_tool_calls: Dict[int, Dict[str, Any]] = {}
289
+ has_assistant_content = False
290
+
291
+ def _flush_assistant_message():
292
+ """将累积的 assistant 消息写入 all_messages"""
293
+ nonlocal current_content_parts, current_tool_calls, accumulated_tool_calls, has_assistant_content
294
+
295
+ # 先处理累积的工具调用
296
+ if accumulated_tool_calls and not current_tool_calls:
297
+ for index in sorted(accumulated_tool_calls.keys()):
298
+ tc_data = accumulated_tool_calls[index]
299
+ current_tool_calls.append({
300
+ "id": tc_data["id"],
301
+ "type": "function",
302
+ "function": {
303
+ "name": tc_data["name"],
304
+ "arguments": tc_data["args"],
305
+ },
306
+ })
307
+
308
+ # 有内容或工具调用时才写入
309
+ if current_content_parts or current_tool_calls:
310
+ content = "".join(current_content_parts) if current_content_parts else None
311
+ finish_reason = "tool_calls" if current_tool_calls else "stop"
312
+ all_messages.append({
313
+ "role": "assistant",
314
+ "content": content,
315
+ "tool_calls": current_tool_calls if current_tool_calls else None,
316
+ "finish_reason": finish_reason,
317
+ })
318
+
319
+ # 重置状态
320
+ current_content_parts = []
321
+ current_tool_calls = []
322
+ accumulated_tool_calls = {}
323
+ has_assistant_content = False
324
+
325
+ for item in items:
326
+ chunk, meta = item
327
+ chunk_type = chunk.__class__.__name__
328
+
329
+ # 过滤 tools 节点的内部 AI 消息
330
+ if (meta or {}).get("langgraph_node") == "tools":
331
+ if chunk_type != "ToolMessage":
332
+ continue
333
+
334
+ if chunk_type in ("AIMessageChunk", "AIMessage"):
335
+ # 收集文本内容
336
+ text = getattr(chunk, "content", "")
337
+ if text:
338
+ current_content_parts.append(str(text))
339
+ has_assistant_content = True
340
+
341
+ # 收集工具调用增量
342
+ tc_chunks = getattr(chunk, "tool_call_chunks", None)
343
+ if tc_chunks:
344
+ for tc in tc_chunks:
345
+ if isinstance(tc, dict):
346
+ index = tc.get("index", 0)
347
+ tc_id = tc.get("id")
348
+ tc_name = tc.get("name")
349
+ tc_args = tc.get("args")
350
+ else:
351
+ index = getattr(tc, "index", 0)
352
+ tc_id = getattr(tc, "id", None)
353
+ tc_name = getattr(tc, "name", None)
354
+ tc_args = getattr(tc, "args", None)
355
+
356
+ if index is None:
357
+ continue
358
+
359
+ tc_id_str = self._normalize_to_string(tc_id)
360
+ tc_name_str = self._normalize_to_string(tc_name)
361
+ tc_args_str = self._normalize_to_string(tc_args)
362
+
363
+ if index not in accumulated_tool_calls:
364
+ accumulated_tool_calls[index] = {
365
+ "id": tc_id_str,
366
+ "name": tc_name_str,
367
+ "args": tc_args_str,
368
+ }
369
+ else:
370
+ accumulated_tool_calls[index]["id"] += tc_id_str
371
+ accumulated_tool_calls[index]["name"] += tc_name_str
372
+ accumulated_tool_calls[index]["args"] += tc_args_str
373
+
374
+ # 检查完整的 tool_calls (AIMessage)
375
+ full_tool_calls = getattr(chunk, "tool_calls", None)
376
+ if full_tool_calls and chunk_type == "AIMessage":
377
+ for tc in full_tool_calls:
378
+ tc_id = tc.get("id") if isinstance(tc, dict) else getattr(tc, "id", "")
379
+ tc_name = tc.get("name") if isinstance(tc, dict) else getattr(tc, "name", "")
380
+ tc_args = tc.get("args") if isinstance(tc, dict) else getattr(tc, "args", {})
381
+
382
+ if isinstance(tc_args, str):
383
+ args_str = tc_args
384
+ else:
385
+ args_str = json.dumps(tc_args, ensure_ascii=False)
386
+
387
+ current_tool_calls.append({
388
+ "id": tc_id,
389
+ "type": "function",
390
+ "function": {
391
+ "name": tc_name,
392
+ "arguments": args_str,
393
+ },
394
+ })
395
+
396
+ elif chunk_type == "ToolMessage":
397
+ # 遇到 ToolMessage,先 flush 之前的 assistant 消息
398
+ _flush_assistant_message()
399
+
400
+ # 添加 tool 响应消息
401
+ is_last = (meta or {}).get("chunk_position") == "last"
402
+ is_streaming = (meta or {}).get("chunk_position") is not None
403
+
404
+ # 只在完成时添加(非流式或 is_last)
405
+ if not is_streaming or is_last:
406
+ tool_call_id = getattr(chunk, "tool_call_id", "") or ""
407
+ result = getattr(chunk, "content", "") or ""
408
+ all_messages.append({
409
+ "role": "tool",
410
+ "tool_call_id": tool_call_id,
411
+ "content": str(result),
412
+ "finish_reason": "stop",
413
+ })
414
+
415
+ # 最后 flush 剩余的 assistant 消息
416
+ _flush_assistant_message()
417
+
418
+ # 构建 choices
419
+ choices: List[Choice] = []
420
+ for idx, msg in enumerate(all_messages):
421
+ if msg["role"] == "assistant":
422
+ choices.append(
423
+ Choice(
424
+ index=idx,
425
+ message=Message(
426
+ role="assistant",
427
+ content=msg.get("content"),
428
+ tool_calls=msg.get("tool_calls"),
429
+ ),
430
+ finish_reason=msg.get("finish_reason", "stop"),
431
+ )
432
+ )
433
+ elif msg["role"] == "tool":
434
+ choices.append(
435
+ Choice(
436
+ index=idx,
437
+ message=Message(
438
+ role="tool",
439
+ tool_call_id=msg.get("tool_call_id"),
440
+ content=msg.get("content"),
441
+ ),
442
+ finish_reason="stop",
443
+ )
444
+ )
445
+
446
+ # 如果没有任何消息,返回空 assistant 消息
447
+ if not choices:
448
+ choices.append(
449
+ Choice(
450
+ index=0,
451
+ message=Message(role="assistant", content=None),
452
+ finish_reason="stop",
453
+ )
454
+ )
455
+
456
+ return ChatCompletionResponse(
457
+ id=self.request_id,
458
+ object="chat.completion",
459
+ created=self.created,
460
+ model=self.model,
461
+ choices=choices,
462
+ usage=Usage(
463
+ prompt_tokens=0,
464
+ completion_tokens=0,
465
+ total_tokens=0,
466
+ ),
467
+ )