coze-coding-utils 0.2.0__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. coze_coding_utils/__init__.py +1 -1
  2. {coze_coding_utils-0.2.0.dist-info → coze_coding_utils-0.2.1.dist-info}/METADATA +2 -17
  3. coze_coding_utils-0.2.1.dist-info/RECORD +7 -0
  4. coze_coding_utils/error/__init__.py +0 -31
  5. coze_coding_utils/error/classifier.py +0 -320
  6. coze_coding_utils/error/codes.py +0 -356
  7. coze_coding_utils/error/exceptions.py +0 -439
  8. coze_coding_utils/error/patterns.py +0 -939
  9. coze_coding_utils/error/test_classifier.py +0 -0
  10. coze_coding_utils/file/__init__.py +0 -0
  11. coze_coding_utils/file/file.py +0 -327
  12. coze_coding_utils/helper/__init__.py +0 -0
  13. coze_coding_utils/helper/agent_helper.py +0 -599
  14. coze_coding_utils/helper/graph_helper.py +0 -231
  15. coze_coding_utils/log/__init__.py +0 -0
  16. coze_coding_utils/log/common.py +0 -8
  17. coze_coding_utils/log/config.py +0 -10
  18. coze_coding_utils/log/err_trace.py +0 -88
  19. coze_coding_utils/log/loop_trace.py +0 -72
  20. coze_coding_utils/log/node_log.py +0 -487
  21. coze_coding_utils/log/parser.py +0 -255
  22. coze_coding_utils/log/write_log.py +0 -183
  23. coze_coding_utils/messages/__init__.py +0 -0
  24. coze_coding_utils/messages/client.py +0 -48
  25. coze_coding_utils/messages/server.py +0 -173
  26. coze_coding_utils/openai/__init__.py +0 -5
  27. coze_coding_utils/openai/converter/__init__.py +0 -6
  28. coze_coding_utils/openai/converter/request_converter.py +0 -165
  29. coze_coding_utils/openai/converter/response_converter.py +0 -467
  30. coze_coding_utils/openai/handler.py +0 -298
  31. coze_coding_utils/openai/types/__init__.py +0 -37
  32. coze_coding_utils/openai/types/request.py +0 -24
  33. coze_coding_utils/openai/types/response.py +0 -178
  34. coze_coding_utils-0.2.0.dist-info/RECORD +0 -37
  35. {coze_coding_utils-0.2.0.dist-info → coze_coding_utils-0.2.1.dist-info}/WHEEL +0 -0
  36. {coze_coding_utils-0.2.0.dist-info → coze_coding_utils-0.2.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,599 +0,0 @@
1
- import uuid
2
- import json
3
- import os
4
- from typing import Any, Dict, List, Tuple, Iterator
5
- import time
6
- from coze_coding_utils.file.file import File, FileOps, infer_file_category
7
- from coze_coding_utils.error import classify_error
8
-
9
- from coze_coding_utils.messages.client import (
10
- ClientMessage,
11
- ClientMessageContent,
12
- QueryDetail,
13
- PromptBlock,
14
- PromptBlockContent,
15
- UploadFileBlockDetail,
16
- )
17
- from coze_coding_utils.messages.server import (
18
- ServerMessage,
19
- ServerMessageContent,
20
- ToolRequestDetail,
21
- ToolResponseDetail,
22
- MessageStartDetail,
23
- MessageEndDetail,
24
- TokenCost,
25
- MESSAGE_TYPE_MESSAGE_START,
26
- MESSAGE_TYPE_MESSAGE_END,
27
- MESSAGE_END_CODE_SUCCESS,
28
- MESSAGE_TYPE_ANSWER,
29
- MESSAGE_TYPE_TOOL_REQUEST,
30
- MESSAGE_TYPE_TOOL_RESPONSE,
31
- )
32
-
33
-
34
- def to_stream_input(msg: ClientMessage) -> Dict[str, Any]:
35
- content_parts = []
36
- if msg and msg.content and msg.content.query and msg.content.query.prompt:
37
- for block in msg.content.query.prompt:
38
- if block.type == "text" and block.content and block.content.text:
39
- content_parts.append({"type": "text", "text": block.content.text})
40
- elif (
41
- block.type == "upload_file"
42
- and block.content
43
- and block.content.upload_file
44
- ):
45
- file_info = block.content.upload_file
46
- file_type, _ = infer_file_category(file_info.url)
47
- file_data = File(url=file_info.url, file_type=file_type)
48
- # check is image
49
- if file_data.file_type == "image":
50
- content_parts.append(
51
- {
52
- "type": "text",
53
- "text": f'{file_data.url}'
54
- }
55
- )
56
- content_parts.append(
57
- {
58
- "type": "image_url",
59
- "image_url": {"url": file_info.url},
60
- }
61
- )
62
- # check is video
63
- elif file_data.file_type == "video":
64
- content_parts.append(
65
- {
66
- "type": "text",
67
- "text": f'{file_data.url}'
68
- }
69
- )
70
- content_parts.append(
71
- {
72
- "type": "video_url",
73
- "video_url": {"url": file_info.url},
74
- }
75
- )
76
- # check is audio
77
- elif file_data.file_type == "audio":
78
- content_parts.append(
79
- {
80
- "type": "text",
81
- "text": f"audio url: {file_info.url}",
82
- }
83
- )
84
- else:
85
- file_content = FileOps.extract_text(file_data)
86
- content_parts.append(
87
- {
88
- "type": "text",
89
- "text": f"file name:{file_info.file_name}, url: {file_info.url}\n\nFile Content:\n{file_content}",
90
- }
91
- )
92
-
93
- return {"messages": [{"role": "user", "content": content_parts}]}
94
-
95
-
96
- def to_client_message(d: Dict[str, Any]) -> Tuple[ClientMessage, str]:
97
- prompt_list = d.get("content", {}).get("query", {}).get("prompt", [])
98
- blocks: List[PromptBlock] = []
99
- for b in prompt_list:
100
- c = b.get("content") or {}
101
- b_type = b.get("type", "text")
102
-
103
- if b_type == "text":
104
- text = c.get("text") if isinstance(c, dict) else None
105
- blocks.append(
106
- PromptBlock(
107
- type=b_type, content=PromptBlockContent(text=text)
108
- )
109
- )
110
- elif b_type == "upload_file":
111
- upload_file_data = c.get("upload_file") if isinstance(c, dict) else None
112
- upload_file = None
113
- if upload_file_data:
114
- upload_file = UploadFileBlockDetail(
115
- file_name=upload_file_data.get("file_name", ""),
116
- file_path=upload_file_data.get("file_path", ""),
117
- url=upload_file_data.get("url", "")
118
- )
119
- blocks.append(
120
- PromptBlock(
121
- type=b_type, content=PromptBlockContent(upload_file=upload_file)
122
- )
123
- )
124
-
125
- return ClientMessage(
126
- type=d.get("type", "query"),
127
- project_id=d.get("project_id", ""),
128
- session_id=d.get("session_id", ""),
129
- local_msg_id=d.get("local_msg_id", ""),
130
- content=ClientMessageContent(query=QueryDetail(prompt=blocks)),
131
- ), d.get("session_id", "")
132
-
133
-
134
- def _merge_tool_call_chunks(chunks: List[Any]) -> List[Dict[str, Any]]:
135
- merged: Dict[int, Dict[str, Any]] = {}
136
- for chunk in chunks:
137
- # chunk can be dict or object
138
- if isinstance(chunk, dict):
139
- index = chunk.get("index")
140
- c_id = chunk.get("id")
141
- c_name = chunk.get("name")
142
- c_args = chunk.get("args")
143
- else:
144
- index = getattr(chunk, "index", None)
145
- c_id = getattr(chunk, "id", None)
146
- c_name = getattr(chunk, "name", None)
147
- c_args = getattr(chunk, "args", None)
148
-
149
- if index is None:
150
- continue
151
-
152
- # Normalize to string to avoid type errors during merge
153
- c_id_str = "".join(str(x) for x in c_id) if isinstance(c_id, list) else (c_id or "")
154
- c_name_str = "".join(str(x) for x in c_name) if isinstance(c_name, list) else (c_name or "")
155
- c_args_str = "".join(str(x) for x in c_args) if isinstance(c_args, list) else (c_args or "")
156
-
157
- if index not in merged:
158
- merged[index] = {
159
- "index": index,
160
- "id": c_id_str,
161
- "name": c_name_str,
162
- "args": c_args_str,
163
- "type": "tool_call",
164
- }
165
- else:
166
- merged[index]["id"] += c_id_str
167
- merged[index]["name"] += c_name_str
168
- merged[index]["args"] += c_args_str
169
-
170
- return list(merged.values())
171
-
172
-
173
- def _item_to_server_messages(
174
- item: Dict[Any, Dict[str, Any]],
175
- *,
176
- session_id: str,
177
- query_msg_id: str,
178
- reply_id: str,
179
- sequence_id_start: int = 1,
180
- log_id: str = "",
181
- ) -> List[ServerMessage]:
182
- chunk, meta = item
183
- messages: List[ServerMessage] = []
184
-
185
- # Filter out messages from "tools" node to prevent internal model outputs from leaking as answers
186
- if (meta or {}).get("langgraph_node") == "tools":
187
- return messages
188
-
189
- def _make_message(
190
- msg_type: str, content: ServerMessageContent, finish: bool, seq: int
191
- ) -> ServerMessage:
192
- return ServerMessage(
193
- type=msg_type,
194
- session_id=session_id,
195
- query_msg_id=query_msg_id,
196
- reply_id=reply_id,
197
- msg_id=str(uuid.uuid4()),
198
- sequence_id=seq,
199
- finish=finish,
200
- content=content,
201
- log_id=log_id,
202
- )
203
-
204
- seq = sequence_id_start
205
-
206
- # Answer chunks (AIMessageChunk)
207
- if chunk.__class__.__name__ == "AIMessageChunk":
208
- text = getattr(chunk, "content", "")
209
-
210
- # Safely get finish_reason
211
- finish_reason = None
212
- try:
213
- resp_meta = getattr(chunk, "response_metadata", {})
214
- if resp_meta and isinstance(resp_meta, dict):
215
- finish_reason = resp_meta.get("finish_reason")
216
- except Exception:
217
- pass
218
-
219
- # Determine if this is the last chunk
220
- is_last_chunk = (meta or {}).get("chunk_position") == "last"
221
- is_finished = is_last_chunk or bool(finish_reason)
222
-
223
- # Check if this chunk involves tool calls
224
- has_tool_calls = bool(getattr(chunk, "tool_call_chunks", None)) or (finish_reason == "tool_calls")
225
-
226
- # Only emit answer if there is text content OR if it's a finish signal
227
- if text or (is_finished and not has_tool_calls):
228
- content = ServerMessageContent(answer=str(text) if text is not None else "")
229
- messages.append(
230
- _make_message(MESSAGE_TYPE_ANSWER, content, bool(is_finished), seq)
231
- )
232
- seq += 1
233
-
234
- # Final answer (AIMessage)
235
- if chunk.__class__.__name__ == "AIMessage":
236
- text = getattr(chunk, "content", "")
237
- if text:
238
- content = ServerMessageContent(answer=text)
239
- messages.append(_make_message(MESSAGE_TYPE_ANSWER, content, True, seq))
240
- seq += 1
241
-
242
- # Tool request from AIMessage (Complete)
243
- # Note: AIMessageChunk tool calls are handled in _iter_body_to_server_messages
244
- tool_calls = getattr(chunk, "tool_calls", None)
245
- if tool_calls and chunk.__class__.__name__ != "AIMessageChunk":
246
- items = tool_calls
247
- for tc in items:
248
- # Normalize parameters to dict
249
- raw_args = (
250
- tc.get("args") if isinstance(tc, dict) else getattr(tc, "args", {})
251
- )
252
- if isinstance(raw_args, str):
253
- try:
254
- parsed = json.loads(raw_args)
255
- parameters = parsed if isinstance(parsed, dict) else {}
256
- except Exception:
257
- parameters = {}
258
- elif isinstance(raw_args, dict):
259
- parameters = raw_args
260
- else:
261
- parameters = {}
262
- tool_name = tc.get("name") if isinstance(tc, dict) else getattr(tc, "name", "")
263
- tool_name_str: str = str(tool_name or "")
264
- detail = ToolRequestDetail(
265
- tool_call_id=(
266
- tc.get("id") if isinstance(tc, dict) else getattr(tc, "id", "")
267
- )
268
- or "",
269
- tool_name=tool_name_str,
270
- parameters={tool_name_str: parameters},
271
- )
272
- content = ServerMessageContent(tool_request=detail)
273
- messages.append(
274
- _make_message(MESSAGE_TYPE_TOOL_REQUEST, content, True, seq)
275
- )
276
- seq += 1
277
-
278
- return messages
279
-
280
-
281
- def _iter_body_to_server_messages(
282
- items: Iterator[Dict[Any, Dict[str, Any]]],
283
- *,
284
- session_id: str,
285
- query_msg_id: str,
286
- reply_id: str,
287
- sequence_id_start: int = 1,
288
- log_id: str = "",
289
- ) -> Iterator[ServerMessage]:
290
- seq = sequence_id_start
291
- # Stable msg_id mapping per logical message stream
292
- # Keys are derived from meta to keep same msg_id across chunks
293
- stable_ids: Dict[Tuple[str, Any], str] = {}
294
-
295
- accumulated_tool_chunks: List[Any] = []
296
- accumulated_tool_response_content: Dict[str, str] = {}
297
-
298
- def _flush_tool_chunks(seq_num: int) -> Tuple[List[ServerMessage], int]:
299
- nonlocal accumulated_tool_chunks
300
- msgs: List[ServerMessage] = []
301
- if not accumulated_tool_chunks:
302
- return msgs, seq_num
303
-
304
- merged_tcs = _merge_tool_call_chunks(accumulated_tool_chunks)
305
- accumulated_tool_chunks = []
306
- for tc in merged_tcs:
307
- raw_args = tc.get("args", {})
308
- if isinstance(raw_args, str):
309
- try:
310
- parsed = json.loads(raw_args)
311
- parameters = parsed if isinstance(parsed, dict) else {}
312
- except Exception:
313
- parameters = {}
314
- elif isinstance(raw_args, dict):
315
- parameters = raw_args
316
- else:
317
- parameters = {}
318
- tool_call_id = tc.get("id", "")
319
- tool_name = tc.get("name", "")
320
-
321
- detail = ToolRequestDetail(
322
- tool_call_id=tool_call_id or "",
323
- tool_name=tool_name or "",
324
- parameters={tool_name: parameters},
325
- )
326
- content = ServerMessageContent(tool_request=detail)
327
- msgs.append(
328
- ServerMessage(
329
- type=MESSAGE_TYPE_TOOL_REQUEST,
330
- session_id=session_id,
331
- query_msg_id=query_msg_id,
332
- reply_id=reply_id,
333
- msg_id=str(uuid.uuid4()),
334
- sequence_id=seq_num,
335
- finish=True,
336
- content=content,
337
- log_id=log_id,
338
- )
339
- )
340
- seq_num += 1
341
- return msgs, seq_num
342
-
343
- for item in items:
344
- chunk, meta = item
345
- chunk_type = chunk.__class__.__name__
346
- is_last = (meta or {}).get("chunk_position") == "last"
347
- is_streaming = (meta or {}).get("chunk_position") is not None
348
-
349
- msgs_to_yield: List[ServerMessage] = []
350
- flushed_msgs: List[ServerMessage] = []
351
-
352
- # 0. Flush accumulated tool chunks if we receive something that is NOT an AIMessageChunk
353
- # OR if we receive an AIMessageChunk but it seems to be a new message (e.g. different id, though hard to track without state)
354
- # Simplest logic: If we have accumulated chunks, and we get a non-AIMessageChunk, flush.
355
- # Also, if we get an AIMessageChunk but it has no tool_call_chunks, it might be an answer chunk.
356
- # We need to be careful not to flush prematurely if the next chunk IS a continuation.
357
-
358
- # However, standard behavior: tool_call_chunks come in a contiguous sequence of AIMessageChunks.
359
- # If we see a ToolMessage, we definitely must flush.
360
- # If we see an AIMessageChunk with NO tool_call_chunks (e.g. text answer), we should probably flush too,
361
- # because usually tool calls and text content are either separate or tool calls come first.
362
- # But let's be safe: only flush on ToolMessage or if is_last=True on AIMessageChunk.
363
-
364
- if chunk_type == "ToolMessage" and accumulated_tool_chunks:
365
- f_msgs, seq = _flush_tool_chunks(seq)
366
- flushed_msgs.extend(f_msgs)
367
-
368
- # 1. Handle AIMessageChunk with tool_call_chunks (Streaming Tool Request)
369
- if chunk_type == "AIMessageChunk":
370
- tc_chunks = getattr(chunk, "tool_call_chunks", None)
371
- if tc_chunks:
372
- accumulated_tool_chunks.extend(tc_chunks)
373
- # If we have accumulated chunks but this chunk has NO tool_call_chunks,
374
- # it implies the tool definition phase is likely over.
375
- elif accumulated_tool_chunks:
376
- f_msgs, seq = _flush_tool_chunks(seq)
377
- flushed_msgs.extend(f_msgs)
378
-
379
- # Flush if this is the last chunk
380
- if is_last and accumulated_tool_chunks:
381
- f_msgs, seq = _flush_tool_chunks(seq)
382
- flushed_msgs.extend(f_msgs)
383
-
384
- # 2. Handle ToolMessage (Tool Response)
385
- elif chunk_type == "ToolMessage":
386
- tcid = getattr(chunk, "tool_call_id", "") or ""
387
- result = getattr(chunk, "content", "") or ""
388
-
389
- full_result = None
390
- should_emit = False
391
-
392
- if not is_streaming:
393
- full_result = result
394
- should_emit = True
395
- else:
396
- if tcid not in accumulated_tool_response_content:
397
- accumulated_tool_response_content[tcid] = ""
398
- accumulated_tool_response_content[tcid] += str(result)
399
-
400
- if is_last:
401
- full_result = accumulated_tool_response_content.pop(tcid)
402
- should_emit = True
403
-
404
- if should_emit:
405
- detail = ToolResponseDetail(
406
- tool_call_id=tcid,
407
- code="0",
408
- message="",
409
- result=str(full_result),
410
- )
411
- content = ServerMessageContent(tool_response=detail)
412
- msgs_to_yield.append(
413
- ServerMessage(
414
- type=MESSAGE_TYPE_TOOL_RESPONSE,
415
- session_id=session_id,
416
- query_msg_id=query_msg_id,
417
- reply_id=reply_id,
418
- msg_id=str(uuid.uuid4()),
419
- sequence_id=seq,
420
- finish=True,
421
- content=content,
422
- log_id=log_id,
423
- )
424
- )
425
- seq += 1
426
-
427
- # 3. Call _item_to_server_messages for everything else
428
- if chunk_type != "ToolMessage":
429
- inner_msgs = _item_to_server_messages(
430
- item,
431
- session_id=session_id,
432
- query_msg_id=query_msg_id,
433
- reply_id=reply_id,
434
- sequence_id_start=seq,
435
- log_id=log_id,
436
- )
437
- # Combine: flushed (previous) + inner (current)
438
- final_msgs = flushed_msgs + inner_msgs
439
- msgs_to_yield.extend(final_msgs)
440
-
441
- if inner_msgs:
442
- seq = inner_msgs[-1].sequence_id + 1
443
- else:
444
- # For ToolMessage, msgs_to_yield already contains the ToolResponse (from block 2).
445
- # We need to prepend flushed_msgs (from block 0).
446
- # Note: msgs_to_yield might contain ToolResponse or nothing (if not emitted yet).
447
- # But flushed_msgs MUST come before whatever is in msgs_to_yield.
448
- # However, msgs_to_yield is a list we are appending to.
449
- # So we should insert flushed_msgs at the beginning?
450
- # Or better, just construct a new list.
451
-
452
- # Current content of msgs_to_yield comes from block 2 (Tool Response).
453
- # flushed_msgs comes from block 0 (Tool Requests flushed).
454
- # Order: Tool Request -> Tool Response.
455
- final_msgs = flushed_msgs + msgs_to_yield
456
- msgs_to_yield = final_msgs
457
-
458
- for m in msgs_to_yield:
459
- # Derive a stable grouping base for this item
460
- group_base = (
461
- meta.get("langgraph_checkpoint_ns")
462
- or meta.get("checkpoint_ns")
463
- or getattr(chunk, "id", None)
464
- or meta.get("run_id")
465
- or meta.get("langgraph_path")
466
- or meta.get("langgraph_step")
467
- )
468
-
469
- # Derive grouping key per message type
470
- key: Tuple[str, Any]
471
- if m.type == MESSAGE_TYPE_TOOL_REQUEST and m.content.tool_request:
472
- tcid = m.content.tool_request.tool_call_id or None
473
- key = (MESSAGE_TYPE_TOOL_REQUEST, tcid or group_base)
474
- elif m.type == MESSAGE_TYPE_TOOL_RESPONSE and m.content.tool_response:
475
- tcid = m.content.tool_response.tool_call_id or None
476
- key = (MESSAGE_TYPE_TOOL_RESPONSE, tcid or group_base)
477
- elif m.type == MESSAGE_TYPE_ANSWER:
478
- # Prefer chunk.id to keep same msg_id across the entire answer stream
479
- key = (MESSAGE_TYPE_ANSWER, getattr(chunk, "id", None) or group_base)
480
- else:
481
- key = (m.type, group_base)
482
-
483
- if key not in stable_ids:
484
- stable_ids[key] = str(uuid.uuid4())
485
- m.msg_id = stable_ids[key]
486
-
487
- yield m
488
-
489
-
490
- def iter_server_messages(
491
- items: Iterator[Dict[Any, Dict[str, Any]]],
492
- *,
493
- session_id: str,
494
- query_msg_id: str,
495
- local_msg_id: str,
496
- run_id: str,
497
- sequence_id_start: int = 1,
498
- log_id: str,
499
- ) -> Iterator[ServerMessage]:
500
- t0 = time.time()
501
- reply_id = str(uuid.uuid4())
502
- start_msg_id = str(uuid.uuid4())
503
- # message_start
504
- start_sm = ServerMessage(
505
- type=MESSAGE_TYPE_MESSAGE_START,
506
- session_id=session_id,
507
- query_msg_id=query_msg_id,
508
- reply_id=reply_id,
509
- msg_id=start_msg_id,
510
- sequence_id=sequence_id_start,
511
- finish=True,
512
- content=ServerMessageContent(
513
- message_start=MessageStartDetail(
514
- local_msg_id=local_msg_id, msg_id=query_msg_id, execute_id=run_id
515
- )
516
- ),
517
- log_id=log_id,
518
- )
519
- yield start_sm
520
- next_seq = sequence_id_start + 1
521
- last_seq = sequence_id_start
522
- try:
523
- # body stream
524
- for sm in _iter_body_to_server_messages(
525
- items,
526
- session_id=session_id,
527
- query_msg_id=query_msg_id,
528
- reply_id=reply_id,
529
- sequence_id_start=next_seq,
530
- log_id=log_id,
531
- ):
532
- yield sm
533
- last_seq = sm.sequence_id
534
-
535
- # message_end
536
- t_ms = int((time.time() - t0) * 1000)
537
- end_sm = ServerMessage(
538
- type=MESSAGE_TYPE_MESSAGE_END,
539
- session_id=session_id,
540
- query_msg_id=query_msg_id,
541
- reply_id=reply_id,
542
- msg_id=str(uuid.uuid4()),
543
- sequence_id=last_seq + 1,
544
- finish=True,
545
- content=ServerMessageContent(
546
- message_end=MessageEndDetail(
547
- code=MESSAGE_END_CODE_SUCCESS,
548
- message="",
549
- token_cost=TokenCost(input_tokens=0, output_tokens=0, total_tokens=0),
550
- time_cost_ms=t_ms,
551
- )
552
- ),
553
- log_id=log_id,
554
- )
555
- yield end_sm
556
- except Exception as ex:
557
- # 使用错误分类器获取错误码
558
- err = classify_error(ex, {"node_name": "stream"})
559
- # message_end
560
- t_ms = int((time.time() - t0) * 1000)
561
- end_sm = ServerMessage(
562
- type=MESSAGE_TYPE_MESSAGE_END,
563
- session_id=session_id,
564
- query_msg_id=query_msg_id,
565
- reply_id=reply_id,
566
- msg_id=str(uuid.uuid4()),
567
- sequence_id=last_seq + 1,
568
- finish=True,
569
- content=ServerMessageContent(
570
- message_end=MessageEndDetail(
571
- code=str(err.code),
572
- message=err.message,
573
- time_cost_ms=t_ms,
574
- token_cost=TokenCost(input_tokens=0, output_tokens=0, total_tokens=0),
575
- )
576
- ),
577
- log_id=log_id,
578
- )
579
- yield end_sm
580
-
581
-
582
- def agent_iter_server_messages(
583
- items: Iterator[Dict[Any, Dict[str, Any]]],
584
- *,
585
- session_id: str,
586
- query_msg_id: str,
587
- local_msg_id: str,
588
- run_id: str,
589
- log_id: str,
590
- ) -> Iterator[ServerMessage]:
591
- return iter_server_messages(
592
- items,
593
- session_id=session_id,
594
- query_msg_id=query_msg_id,
595
- local_msg_id=local_msg_id,
596
- run_id=run_id,
597
- sequence_id_start=1,
598
- log_id=log_id,
599
- )