coze-coding-utils 0.2.1__py3-none-any.whl → 0.2.2a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- coze_coding_utils/__init__.py +1 -1
- coze_coding_utils/error/__init__.py +31 -0
- coze_coding_utils/error/classifier.py +320 -0
- coze_coding_utils/error/codes.py +356 -0
- coze_coding_utils/error/exceptions.py +439 -0
- coze_coding_utils/error/patterns.py +939 -0
- coze_coding_utils/error/test_classifier.py +0 -0
- coze_coding_utils/file/__init__.py +0 -0
- coze_coding_utils/file/file.py +327 -0
- coze_coding_utils/helper/__init__.py +0 -0
- coze_coding_utils/helper/agent_helper.py +599 -0
- coze_coding_utils/helper/graph_helper.py +231 -0
- coze_coding_utils/log/__init__.py +0 -0
- coze_coding_utils/log/common.py +8 -0
- coze_coding_utils/log/config.py +10 -0
- coze_coding_utils/log/err_trace.py +88 -0
- coze_coding_utils/log/loop_trace.py +72 -0
- coze_coding_utils/log/node_log.py +487 -0
- coze_coding_utils/log/parser.py +255 -0
- coze_coding_utils/log/write_log.py +183 -0
- coze_coding_utils/messages/__init__.py +0 -0
- coze_coding_utils/messages/client.py +48 -0
- coze_coding_utils/messages/server.py +173 -0
- coze_coding_utils/openai/__init__.py +5 -0
- coze_coding_utils/openai/converter/__init__.py +6 -0
- coze_coding_utils/openai/converter/request_converter.py +165 -0
- coze_coding_utils/openai/converter/response_converter.py +467 -0
- coze_coding_utils/openai/handler.py +298 -0
- coze_coding_utils/openai/types/__init__.py +37 -0
- coze_coding_utils/openai/types/request.py +24 -0
- coze_coding_utils/openai/types/response.py +178 -0
- {coze_coding_utils-0.2.1.dist-info → coze_coding_utils-0.2.2a1.dist-info}/METADATA +2 -2
- coze_coding_utils-0.2.2a1.dist-info/RECORD +37 -0
- coze_coding_utils-0.2.1.dist-info/RECORD +0 -7
- {coze_coding_utils-0.2.1.dist-info → coze_coding_utils-0.2.2a1.dist-info}/WHEEL +0 -0
- {coze_coding_utils-0.2.1.dist-info → coze_coding_utils-0.2.2a1.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,487 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import logging
|
|
3
|
+
from uuid import UUID
|
|
4
|
+
from openai import BaseModel
|
|
5
|
+
from coze_coding_utils.log.config import LOG_DIR
|
|
6
|
+
from coze_coding_utils.log.common import get_execute_mode, is_prod
|
|
7
|
+
import uuid
|
|
8
|
+
from langchain_core.callbacks import BaseCallbackHandler
|
|
9
|
+
from coze_coding_utils.runtime_ctx.context import Context
|
|
10
|
+
import os
|
|
11
|
+
import sys
|
|
12
|
+
import json
|
|
13
|
+
from typing import Dict, Optional, Any
|
|
14
|
+
from pydantic import BaseModel
|
|
15
|
+
from coze_coding_utils.log.parser import LangGraphParser
|
|
16
|
+
import asyncio
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ParamInfo:
|
|
20
|
+
name: str # 参数名
|
|
21
|
+
ptype: str # 参数类型
|
|
22
|
+
optional: bool # 是否选填
|
|
23
|
+
description: Optional[str] = None # 参数说明
|
|
24
|
+
items: Optional['ParamInfo'] = None # 子参数,用于数组
|
|
25
|
+
default: Optional[Any] = None # 默认值,用于基础类型
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
# 2. 确保日志目录存在
|
|
29
|
+
# 尝试使用可写目录,先尝试/app目录,如果失败则使用/tmp目录
|
|
30
|
+
try:
|
|
31
|
+
LOG_FILE = os.path.join(LOG_DIR, 'app.log')
|
|
32
|
+
# 测试写入权限
|
|
33
|
+
with open(LOG_FILE, 'a') as f:
|
|
34
|
+
pass
|
|
35
|
+
except Exception as e:
|
|
36
|
+
# 如果无法在/app目录写入,则使用/tmp目录
|
|
37
|
+
# 确保创建的是目录而不是文件路径
|
|
38
|
+
FALLBACK_LOG_DIR = '/tmp/work/logs/bypass'
|
|
39
|
+
os.makedirs(FALLBACK_LOG_DIR, exist_ok=True)
|
|
40
|
+
LOG_FILE = os.path.join(FALLBACK_LOG_DIR, 'app.log')
|
|
41
|
+
print(f"Warning: Using fallback log directory: {FALLBACK_LOG_DIR}, due to error: {e}", flush=True)
|
|
42
|
+
|
|
43
|
+
# 3. 只配置控制台输出的logging
|
|
44
|
+
logging.basicConfig(
|
|
45
|
+
level=logging.INFO,
|
|
46
|
+
format='%(message)s',
|
|
47
|
+
handlers=[
|
|
48
|
+
logging.StreamHandler(sys.stdout)
|
|
49
|
+
]
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
# 获取logger实例(仅用于控制台输出)
|
|
53
|
+
logger = logging.getLogger(__name__)
|
|
54
|
+
logger.setLevel(logging.INFO)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def write_log(log_entry):
|
|
58
|
+
"""
|
|
59
|
+
直接使用文件操作写入JSON格式日志,确保立即刷新到磁盘
|
|
60
|
+
:param log_entry: 符合要求格式的日志字典
|
|
61
|
+
"""
|
|
62
|
+
try:
|
|
63
|
+
if is_prod():
|
|
64
|
+
# 线上不打日志,待具备清理能后再打
|
|
65
|
+
return None
|
|
66
|
+
log_json = json.dumps(log_entry, ensure_ascii=False)
|
|
67
|
+
|
|
68
|
+
# 修改为行缓冲模式(buffering=1)而不是无缓冲模式
|
|
69
|
+
with open(LOG_FILE, 'a', encoding='utf-8', buffering=1) as f: # 行缓冲模式
|
|
70
|
+
f.write(log_json + '\n')
|
|
71
|
+
# 显式调用flush和fsync确保数据写入磁盘
|
|
72
|
+
f.flush()
|
|
73
|
+
os.fsync(f.fileno())
|
|
74
|
+
|
|
75
|
+
# 同时输出到控制台以便调试
|
|
76
|
+
level = log_entry.get('level', 'info').lower()
|
|
77
|
+
log_method = getattr(logger, level, logger.info)
|
|
78
|
+
log_method(log_entry.get('message', ''))
|
|
79
|
+
|
|
80
|
+
except Exception as e:
|
|
81
|
+
# 如果写入失败,打印到标准错误
|
|
82
|
+
print(f"Failed to write log: {e}", flush=True)
|
|
83
|
+
# 尝试再次写入作为备选
|
|
84
|
+
try:
|
|
85
|
+
log_json = json.dumps(log_entry, ensure_ascii=False)
|
|
86
|
+
print(f"Attempting fallback write: {log_json}", flush=True)
|
|
87
|
+
# 修改备选方案为行缓冲模式
|
|
88
|
+
f = open(LOG_FILE, 'a', encoding='utf-8', buffering=1)
|
|
89
|
+
try:
|
|
90
|
+
f.write(log_json + '\n')
|
|
91
|
+
f.flush()
|
|
92
|
+
os.fsync(f.fileno())
|
|
93
|
+
finally:
|
|
94
|
+
f.close()
|
|
95
|
+
except Exception as fallback_e:
|
|
96
|
+
print(f"Fallback log write failed: {fallback_e}", flush=True)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def create_log_entry(level="info", message="", timestamp=None, log_id=None, latency=0,
|
|
100
|
+
input_data="", output_data="", node_id="", project_id="", commit_id="",
|
|
101
|
+
execute_mode="run", caller="", node_type="", node_title="",
|
|
102
|
+
token="", cost="", error_code="", error_message="", event_type="", execution_id="", node_name="",
|
|
103
|
+
method=""):
|
|
104
|
+
"""
|
|
105
|
+
创建符合要求的日志条目
|
|
106
|
+
:param level: 日志级别
|
|
107
|
+
:param message: 日志内容
|
|
108
|
+
:param timestamp: 时间戳,默认为当前时间
|
|
109
|
+
:param log_id: 日志ID,默认为生成的UUID
|
|
110
|
+
:param latency: 延迟时间
|
|
111
|
+
:param input_data: 输入数据
|
|
112
|
+
:param output_data: 输出数据
|
|
113
|
+
:param node_id: 节点ID
|
|
114
|
+
:param project_id: 项目ID
|
|
115
|
+
:param commit_id: 提交ID
|
|
116
|
+
:param execute_mode: 执行模式
|
|
117
|
+
:param caller: 调用者
|
|
118
|
+
:param node_type: 节点类型
|
|
119
|
+
:param node_title: 节点标题
|
|
120
|
+
:param token: token信息
|
|
121
|
+
:param cost: 成本信息
|
|
122
|
+
:param error_code: 错误码
|
|
123
|
+
:param error_message: 错误信息
|
|
124
|
+
:param event_type: 事件类型
|
|
125
|
+
:param execution_id: 执行唯一ID(可选)
|
|
126
|
+
:param node_name: 节点名称
|
|
127
|
+
:param method: 方法名称
|
|
128
|
+
:return: 格式化的日志字典
|
|
129
|
+
"""
|
|
130
|
+
if timestamp is None:
|
|
131
|
+
timestamp = int(time.time() * 1000)
|
|
132
|
+
# 限制input_data和output_data的长度,设置为1MB,则使用兜底文案
|
|
133
|
+
if len(input_data) > 1024 * 1024:
|
|
134
|
+
input_data = "输入数据长度超过1MB,已截断"
|
|
135
|
+
if len(output_data) > 1024 * 1024:
|
|
136
|
+
output_data = "输出数据长度超过1MB,已截断"
|
|
137
|
+
return {
|
|
138
|
+
"level": level,
|
|
139
|
+
"message": message,
|
|
140
|
+
"timestamp": timestamp,
|
|
141
|
+
"log_id": log_id,
|
|
142
|
+
"latency": latency,
|
|
143
|
+
"input": input_data,
|
|
144
|
+
"output": output_data,
|
|
145
|
+
"node_id": node_id,
|
|
146
|
+
"project_id": project_id,
|
|
147
|
+
"commit_id": commit_id,
|
|
148
|
+
"execute_mode": execute_mode,
|
|
149
|
+
"caller": caller,
|
|
150
|
+
"node_type": node_type,
|
|
151
|
+
"node_title": node_title,
|
|
152
|
+
"token": token,
|
|
153
|
+
"cost": cost,
|
|
154
|
+
"error_code": error_code,
|
|
155
|
+
"error_message": error_message,
|
|
156
|
+
"type": event_type,
|
|
157
|
+
"execute_id": execution_id,
|
|
158
|
+
"node_name": node_name,
|
|
159
|
+
"method": method,
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def log_workflow_start(project_id, commit_id, log_id=None, execute_id="", input_data="", method=""):
|
|
164
|
+
"""
|
|
165
|
+
记录流程开始日志
|
|
166
|
+
:param project_id: 项目ID
|
|
167
|
+
:param commit_id: 提交ID
|
|
168
|
+
:param is_test_run: 是否试运行
|
|
169
|
+
:param log_id: 日志ID(可选)
|
|
170
|
+
:param execute_id: 执行唯一ID(可选)
|
|
171
|
+
"""
|
|
172
|
+
event_type = "test_run_start" if not is_prod() else "run_start"
|
|
173
|
+
execute_mode = "test_run" if not is_prod() else "run"
|
|
174
|
+
|
|
175
|
+
message = f"Workflow started - {'Test Run' if is_prod() else 'Run'}"
|
|
176
|
+
|
|
177
|
+
log_entry = create_log_entry(
|
|
178
|
+
level="info",
|
|
179
|
+
message=message,
|
|
180
|
+
log_id=log_id,
|
|
181
|
+
project_id=project_id,
|
|
182
|
+
commit_id=commit_id,
|
|
183
|
+
execute_mode=execute_mode,
|
|
184
|
+
event_type=event_type,
|
|
185
|
+
execution_id=execute_id,
|
|
186
|
+
input_data=input_data,
|
|
187
|
+
method=method,
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
write_log(log_entry)
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
def log_workflow_end(execution_id, output=None, total_time=None, status="success", token_consumed=None,
|
|
194
|
+
error_reason=None, error_code=None, is_test_run=False, log_id="", method=""):
|
|
195
|
+
"""
|
|
196
|
+
记录流程结束日志
|
|
197
|
+
:param execution_id: 执行唯一ID
|
|
198
|
+
:param output: 流程输出
|
|
199
|
+
:param total_time: 流程耗时
|
|
200
|
+
:param status: 流程执行状态
|
|
201
|
+
:param token_consumed: 流程token消耗
|
|
202
|
+
:param error_reason: 错误原因
|
|
203
|
+
:param error_code: 错误码
|
|
204
|
+
:param is_test_run: 是否试运行
|
|
205
|
+
"""
|
|
206
|
+
level = "error" if status == "error" else "info"
|
|
207
|
+
execute_mode = "test_run" if is_test_run else "run"
|
|
208
|
+
|
|
209
|
+
message = f"Workflow completed - {status}"
|
|
210
|
+
if execution_id:
|
|
211
|
+
message += f" (ID: {execution_id})"
|
|
212
|
+
|
|
213
|
+
log_entry = create_log_entry(
|
|
214
|
+
level=level,
|
|
215
|
+
message=message,
|
|
216
|
+
latency=int(total_time * 1000) if total_time else 0,
|
|
217
|
+
output_data=_serialize_data(output),
|
|
218
|
+
execute_mode=execute_mode,
|
|
219
|
+
event_type="test_run_done" if is_test_run else "done",
|
|
220
|
+
token=str(token_consumed) if token_consumed else "",
|
|
221
|
+
error_code=str(error_code) if error_code else "",
|
|
222
|
+
error_message=str(error_reason) if error_reason else "",
|
|
223
|
+
execution_id=execution_id,
|
|
224
|
+
log_id=log_id,
|
|
225
|
+
method=method,
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
write_log(log_entry)
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
class Logger(BaseCallbackHandler):
|
|
232
|
+
def __init__(self, graph, ctx: Context):
|
|
233
|
+
self.root_run_id = None
|
|
234
|
+
self.graph = graph
|
|
235
|
+
self.runtime_ctx = ctx
|
|
236
|
+
self.start_time = time.time()
|
|
237
|
+
self.parser = LangGraphParser(graph)
|
|
238
|
+
|
|
239
|
+
run_id_map: Dict[uuid.UUID, str] = {}
|
|
240
|
+
|
|
241
|
+
def on_chain_start_graph(
|
|
242
|
+
self,
|
|
243
|
+
serialized: dict[str, Any],
|
|
244
|
+
inputs: dict[str, Any],
|
|
245
|
+
*,
|
|
246
|
+
run_id: uuid.UUID,
|
|
247
|
+
parent_run_id: uuid.UUID | None = None,
|
|
248
|
+
tags: list[str] | None = None,
|
|
249
|
+
metadata: dict[str, Any] | None = None,
|
|
250
|
+
**kwargs: Any,
|
|
251
|
+
) -> Any:
|
|
252
|
+
if metadata is None:
|
|
253
|
+
metadata = {}
|
|
254
|
+
node_name_value = kwargs.get("name")
|
|
255
|
+
node_name: str | None = node_name_value if isinstance(node_name_value, str) else None
|
|
256
|
+
if node_name:
|
|
257
|
+
self.run_id_map[run_id] = node_name
|
|
258
|
+
if parent_run_id is None:
|
|
259
|
+
self._on_graph_start(inputs) # workflow 开始
|
|
260
|
+
node_info = self.parser.nodes.get(node_name) if node_name is not None else None
|
|
261
|
+
if node_info is None:
|
|
262
|
+
# 检查是否为条件节点
|
|
263
|
+
if node_name in self.parser.condition_funcs:
|
|
264
|
+
# 记录条件节点日志
|
|
265
|
+
log_entry = create_log_entry(
|
|
266
|
+
level="info",
|
|
267
|
+
message=f"Condition node '{node_name}' started",
|
|
268
|
+
input_data=_serialize_data(inputs),
|
|
269
|
+
node_name=self.parser.condition_funcs[node_name]["cond_node_name"], # 前端的条件节点名
|
|
270
|
+
execution_id=self.runtime_ctx.run_id,
|
|
271
|
+
execute_mode=get_execute_mode(),
|
|
272
|
+
event_type="node_start",
|
|
273
|
+
log_id=self.runtime_ctx.logid,
|
|
274
|
+
method=self.runtime_ctx.method,
|
|
275
|
+
node_type="condition"
|
|
276
|
+
)
|
|
277
|
+
write_log(log_entry)
|
|
278
|
+
return
|
|
279
|
+
logger.debug(f"Node {node_name} not found in graph")
|
|
280
|
+
return
|
|
281
|
+
log_entry = create_log_entry(
|
|
282
|
+
level="info",
|
|
283
|
+
message=f"Node '{node_info.name}' started",
|
|
284
|
+
input_data=_serialize_data(inputs),
|
|
285
|
+
node_id=node_info.node_id,
|
|
286
|
+
node_type=node_info.node_type,
|
|
287
|
+
node_title=node_info.title,
|
|
288
|
+
execute_mode=get_execute_mode(),
|
|
289
|
+
event_type="node_start",
|
|
290
|
+
execution_id=self.runtime_ctx.run_id,
|
|
291
|
+
log_id=self.runtime_ctx.logid,
|
|
292
|
+
node_name=node_info.name,
|
|
293
|
+
method=self.runtime_ctx.method,
|
|
294
|
+
)
|
|
295
|
+
write_log(log_entry)
|
|
296
|
+
|
|
297
|
+
def on_chain_end_graph(
|
|
298
|
+
self,
|
|
299
|
+
outputs: dict[str, Any],
|
|
300
|
+
*,
|
|
301
|
+
run_id: uuid.UUID,
|
|
302
|
+
parent_run_id: uuid.UUID | None = None,
|
|
303
|
+
**kwargs: Any,
|
|
304
|
+
) -> Any:
|
|
305
|
+
node_name = self.run_id_map.pop(run_id, None)
|
|
306
|
+
if parent_run_id is None: # 根节点
|
|
307
|
+
self._on_graph_end(outputs)
|
|
308
|
+
elif node_name:
|
|
309
|
+
# Node end
|
|
310
|
+
node_info = self.parser.nodes.get(node_name, None)
|
|
311
|
+
if node_info is None:
|
|
312
|
+
# 检查是否为条件节点
|
|
313
|
+
if node_name in self.parser.condition_funcs:
|
|
314
|
+
# 记录条件节点日志
|
|
315
|
+
log_entry = create_log_entry(
|
|
316
|
+
level="info",
|
|
317
|
+
message=f"Condition node '{node_name}' ended",
|
|
318
|
+
output_data=_serialize_data(outputs),
|
|
319
|
+
node_name=self.parser.condition_funcs[node_name]["cond_node_name"], # 前端的条件节点名
|
|
320
|
+
execution_id=self.runtime_ctx.run_id,
|
|
321
|
+
execute_mode=get_execute_mode(),
|
|
322
|
+
event_type="node_end",
|
|
323
|
+
log_id=self.runtime_ctx.logid,
|
|
324
|
+
method=self.runtime_ctx.method,
|
|
325
|
+
node_type="condition"
|
|
326
|
+
)
|
|
327
|
+
write_log(log_entry)
|
|
328
|
+
return
|
|
329
|
+
logger.debug(f"Node {node_name} not found in graph")
|
|
330
|
+
return
|
|
331
|
+
log_entry = create_log_entry(
|
|
332
|
+
level="info",
|
|
333
|
+
message=f"Node '{node_info.name}' ended",
|
|
334
|
+
output_data=_serialize_data(outputs),
|
|
335
|
+
node_id=node_info.node_id, # 注册的时候使用的function name,前端用来流转
|
|
336
|
+
node_type=node_info.node_type,
|
|
337
|
+
node_title=node_info.title,
|
|
338
|
+
execute_mode=get_execute_mode(),
|
|
339
|
+
event_type="node_end",
|
|
340
|
+
execution_id=self.runtime_ctx.run_id,
|
|
341
|
+
log_id=self.runtime_ctx.logid,
|
|
342
|
+
node_name=node_info.name,
|
|
343
|
+
method=self.runtime_ctx.method,
|
|
344
|
+
)
|
|
345
|
+
write_log(log_entry)
|
|
346
|
+
|
|
347
|
+
def _on_graph_start(self, inputs: Dict[str, Any]):
|
|
348
|
+
# Workflow start
|
|
349
|
+
project_id = os.getenv("COZE_PROJECT_ID", "")
|
|
350
|
+
commit_id = "" # This might need to be sourced from metadata if available
|
|
351
|
+
log_workflow_start(
|
|
352
|
+
project_id=project_id,
|
|
353
|
+
commit_id=commit_id,
|
|
354
|
+
log_id=str(self.runtime_ctx.logid),
|
|
355
|
+
execute_id=self.runtime_ctx.run_id,
|
|
356
|
+
input_data=_serialize_data(inputs),
|
|
357
|
+
method=self.runtime_ctx.method,
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
def _on_graph_end(self, outputs: Dict[str, Any]):
|
|
361
|
+
# Workflow end
|
|
362
|
+
total_time = time.time() - self.start_time
|
|
363
|
+
log_workflow_end(
|
|
364
|
+
execution_id=self.runtime_ctx.run_id,
|
|
365
|
+
output=outputs,
|
|
366
|
+
total_time=total_time,
|
|
367
|
+
status="success",
|
|
368
|
+
log_id=self.runtime_ctx.logid,
|
|
369
|
+
is_test_run=not is_prod(),
|
|
370
|
+
method=self.runtime_ctx.method,
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
def on_chain_error(
|
|
374
|
+
self,
|
|
375
|
+
error: BaseException,
|
|
376
|
+
*,
|
|
377
|
+
run_id: UUID,
|
|
378
|
+
parent_run_id: UUID | None = None,
|
|
379
|
+
**kwargs: Any,
|
|
380
|
+
) -> Any:
|
|
381
|
+
event_type = "error"
|
|
382
|
+
|
|
383
|
+
# 如果是取消操作,事件类型改为取消
|
|
384
|
+
if isinstance(error, asyncio.CancelledError):
|
|
385
|
+
logger.info(f"Task cancelled for run_id: {run_id}")
|
|
386
|
+
event_type = "cancel"
|
|
387
|
+
# 记录节点失败日志
|
|
388
|
+
node_name = self.run_id_map.pop(run_id, "")
|
|
389
|
+
# Node end
|
|
390
|
+
node_id = ""
|
|
391
|
+
node_title = ""
|
|
392
|
+
node_type = ""
|
|
393
|
+
node_info = self.parser.nodes.get(node_name) if node_name is not None else None
|
|
394
|
+
if node_info is not None:
|
|
395
|
+
node_name = node_info.name if node_info else ""
|
|
396
|
+
node_title = node_info.title if node_info else ""
|
|
397
|
+
node_id = node_info.node_id if node_info else ""
|
|
398
|
+
node_type = node_info.node_type if node_info else ""
|
|
399
|
+
# 节点异常
|
|
400
|
+
error_log_entry = create_log_entry(
|
|
401
|
+
level="error",
|
|
402
|
+
message=f"Workflow {node_id} ended with error",
|
|
403
|
+
node_id=node_id,
|
|
404
|
+
node_type=node_type,
|
|
405
|
+
node_title=node_title,
|
|
406
|
+
execute_mode=get_execute_mode(),
|
|
407
|
+
event_type=event_type,
|
|
408
|
+
execution_id=self.runtime_ctx.run_id,
|
|
409
|
+
log_id=self.runtime_ctx.logid,
|
|
410
|
+
error_message=str(error),
|
|
411
|
+
node_name=node_name,
|
|
412
|
+
method=self.runtime_ctx.method,
|
|
413
|
+
)
|
|
414
|
+
write_log(error_log_entry)
|
|
415
|
+
|
|
416
|
+
def get_node_tags(self, node_name: str) -> dict[str, str]:
|
|
417
|
+
node_tags = {}
|
|
418
|
+
if node_name is None or node_name == "":
|
|
419
|
+
return node_tags
|
|
420
|
+
|
|
421
|
+
node_info = self.parser.nodes.get(node_name, None)
|
|
422
|
+
if node_info is None:
|
|
423
|
+
logger.debug(f"Node {node_name} not found in graph")
|
|
424
|
+
return {}
|
|
425
|
+
node_tags["node_id"] = node_info.node_id
|
|
426
|
+
node_tags["node_type"] = self.parser.get_node_type(node_info.node_id)
|
|
427
|
+
node_tags["node_title"] = node_info.title
|
|
428
|
+
node_tags["node_name"] = node_info.name
|
|
429
|
+
return node_tags
|
|
430
|
+
|
|
431
|
+
def get_node_name(self, node_name: str) -> str:
|
|
432
|
+
# 获取node title
|
|
433
|
+
if node_name == "LangGraph":
|
|
434
|
+
return "Workflow"
|
|
435
|
+
node_info = self.parser.nodes.get(node_name, None)
|
|
436
|
+
if node_info is None:
|
|
437
|
+
logger.debug(f"Node {node_name} not found in graph")
|
|
438
|
+
return node_name
|
|
439
|
+
node_title = node_info.title if node_info else node_name # 没有title时,使用node_name
|
|
440
|
+
return node_title
|
|
441
|
+
|
|
442
|
+
|
|
443
|
+
def _serialize_data(data: Any) -> str:
|
|
444
|
+
"""
|
|
445
|
+
增强版数据序列化函数,支持:
|
|
446
|
+
- Pydantic BaseModel
|
|
447
|
+
- 字典/列表等基础类型
|
|
448
|
+
- 自定义对象(通过 __dict__ 序列化)
|
|
449
|
+
- 特殊字符(保证 ASCII 编码)
|
|
450
|
+
"""
|
|
451
|
+
|
|
452
|
+
def _recursive_serialize(item: Any):
|
|
453
|
+
"""递归序列化单个元素"""
|
|
454
|
+
# 处理 Pydantic 模型
|
|
455
|
+
if isinstance(item, BaseModel):
|
|
456
|
+
return item.model_dump() # 先转字典再序列化
|
|
457
|
+
|
|
458
|
+
# 处理列表/元组
|
|
459
|
+
elif isinstance(item, (list, tuple)):
|
|
460
|
+
return [_recursive_serialize(sub_item) for sub_item in item]
|
|
461
|
+
|
|
462
|
+
# 处理字典
|
|
463
|
+
elif isinstance(item, dict):
|
|
464
|
+
return {key: _recursive_serialize(value) for key, value in item.items()}
|
|
465
|
+
|
|
466
|
+
# 处理自定义对象(有 __dict__ 属性的)
|
|
467
|
+
elif hasattr(item, '__dict__') and not isinstance(item, (str, int, float, bool, type(None))):
|
|
468
|
+
return _recursive_serialize(item.__dict__)
|
|
469
|
+
|
|
470
|
+
# 基础类型直接返回
|
|
471
|
+
else:
|
|
472
|
+
return item
|
|
473
|
+
|
|
474
|
+
try:
|
|
475
|
+
# 先递归处理数据为可序列化的基础类型
|
|
476
|
+
serialized_data = _recursive_serialize(data)
|
|
477
|
+
# 最终序列化为 JSON 字符串
|
|
478
|
+
return json.dumps(serialized_data, ensure_ascii=False, indent=None)
|
|
479
|
+
|
|
480
|
+
except Exception as e:
|
|
481
|
+
logger.error(f"Error serializing data: {e}", exc_info=True)
|
|
482
|
+
# 降级处理:返回字符串表示
|
|
483
|
+
if len(str(data)) > 1000:
|
|
484
|
+
# 避免bytes类型返回过大,打挂线程
|
|
485
|
+
logger.info(f"Data too large and truncated, len={len(str(data))}")
|
|
486
|
+
return ""
|
|
487
|
+
return str(data)
|