botrun-flow-lang 5.10.82__py3-none-any.whl → 5.10.83__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. botrun_flow_lang/api/auth_api.py +39 -39
  2. botrun_flow_lang/api/auth_utils.py +183 -183
  3. botrun_flow_lang/api/botrun_back_api.py +65 -65
  4. botrun_flow_lang/api/flow_api.py +3 -3
  5. botrun_flow_lang/api/hatch_api.py +481 -481
  6. botrun_flow_lang/api/langgraph_api.py +796 -796
  7. botrun_flow_lang/api/line_bot_api.py +1357 -1357
  8. botrun_flow_lang/api/model_api.py +300 -300
  9. botrun_flow_lang/api/rate_limit_api.py +32 -32
  10. botrun_flow_lang/api/routes.py +79 -79
  11. botrun_flow_lang/api/search_api.py +53 -53
  12. botrun_flow_lang/api/storage_api.py +316 -316
  13. botrun_flow_lang/api/subsidy_api.py +290 -290
  14. botrun_flow_lang/api/subsidy_api_system_prompt.txt +109 -109
  15. botrun_flow_lang/api/user_setting_api.py +70 -70
  16. botrun_flow_lang/api/version_api.py +31 -31
  17. botrun_flow_lang/api/youtube_api.py +26 -26
  18. botrun_flow_lang/constants.py +13 -13
  19. botrun_flow_lang/langgraph_agents/agents/agent_runner.py +174 -174
  20. botrun_flow_lang/langgraph_agents/agents/agent_tools/step_planner.py +77 -77
  21. botrun_flow_lang/langgraph_agents/agents/checkpointer/firestore_checkpointer.py +666 -666
  22. botrun_flow_lang/langgraph_agents/agents/gov_researcher/GOV_RESEARCHER_PRD.md +192 -192
  23. botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_2_graph.py +1002 -1002
  24. botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_graph.py +822 -822
  25. botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py +591 -548
  26. botrun_flow_lang/langgraph_agents/agents/search_agent_graph.py +864 -864
  27. botrun_flow_lang/langgraph_agents/agents/tools/__init__.py +4 -4
  28. botrun_flow_lang/langgraph_agents/agents/tools/gemini_code_execution.py +376 -376
  29. botrun_flow_lang/langgraph_agents/agents/util/gemini_grounding.py +66 -66
  30. botrun_flow_lang/langgraph_agents/agents/util/html_util.py +316 -316
  31. botrun_flow_lang/langgraph_agents/agents/util/img_util.py +294 -294
  32. botrun_flow_lang/langgraph_agents/agents/util/local_files.py +345 -345
  33. botrun_flow_lang/langgraph_agents/agents/util/mermaid_util.py +86 -86
  34. botrun_flow_lang/langgraph_agents/agents/util/model_utils.py +143 -143
  35. botrun_flow_lang/langgraph_agents/agents/util/pdf_analyzer.py +160 -160
  36. botrun_flow_lang/langgraph_agents/agents/util/perplexity_search.py +464 -464
  37. botrun_flow_lang/langgraph_agents/agents/util/plotly_util.py +59 -59
  38. botrun_flow_lang/langgraph_agents/agents/util/tavily_search.py +199 -199
  39. botrun_flow_lang/langgraph_agents/agents/util/youtube_util.py +90 -90
  40. botrun_flow_lang/langgraph_agents/cache/langgraph_botrun_cache.py +197 -197
  41. botrun_flow_lang/llm_agent/llm_agent.py +19 -19
  42. botrun_flow_lang/llm_agent/llm_agent_util.py +83 -83
  43. botrun_flow_lang/log/.gitignore +2 -2
  44. botrun_flow_lang/main.py +61 -61
  45. botrun_flow_lang/main_fast.py +51 -51
  46. botrun_flow_lang/mcp_server/__init__.py +10 -10
  47. botrun_flow_lang/mcp_server/default_mcp.py +711 -711
  48. botrun_flow_lang/models/nodes/utils.py +205 -205
  49. botrun_flow_lang/models/token_usage.py +34 -34
  50. botrun_flow_lang/requirements.txt +21 -21
  51. botrun_flow_lang/services/base/firestore_base.py +30 -30
  52. botrun_flow_lang/services/hatch/hatch_factory.py +11 -11
  53. botrun_flow_lang/services/hatch/hatch_fs_store.py +372 -372
  54. botrun_flow_lang/services/storage/storage_cs_store.py +202 -202
  55. botrun_flow_lang/services/storage/storage_factory.py +12 -12
  56. botrun_flow_lang/services/storage/storage_store.py +65 -65
  57. botrun_flow_lang/services/user_setting/user_setting_factory.py +9 -9
  58. botrun_flow_lang/services/user_setting/user_setting_fs_store.py +66 -66
  59. botrun_flow_lang/static/docs/tools/index.html +926 -926
  60. botrun_flow_lang/tests/api_functional_tests.py +1525 -1525
  61. botrun_flow_lang/tests/api_stress_test.py +357 -357
  62. botrun_flow_lang/tests/shared_hatch_tests.py +333 -333
  63. botrun_flow_lang/tests/test_botrun_app.py +46 -46
  64. botrun_flow_lang/tests/test_html_util.py +31 -31
  65. botrun_flow_lang/tests/test_img_analyzer.py +190 -190
  66. botrun_flow_lang/tests/test_img_util.py +39 -39
  67. botrun_flow_lang/tests/test_local_files.py +114 -114
  68. botrun_flow_lang/tests/test_mermaid_util.py +103 -103
  69. botrun_flow_lang/tests/test_pdf_analyzer.py +104 -104
  70. botrun_flow_lang/tests/test_plotly_util.py +151 -151
  71. botrun_flow_lang/tests/test_run_workflow_engine.py +65 -65
  72. botrun_flow_lang/tools/generate_docs.py +133 -133
  73. botrun_flow_lang/tools/templates/tools.html +153 -153
  74. botrun_flow_lang/utils/__init__.py +7 -7
  75. botrun_flow_lang/utils/botrun_logger.py +344 -344
  76. botrun_flow_lang/utils/clients/rate_limit_client.py +209 -209
  77. botrun_flow_lang/utils/clients/token_verify_client.py +153 -153
  78. botrun_flow_lang/utils/google_drive_utils.py +654 -654
  79. botrun_flow_lang/utils/langchain_utils.py +324 -324
  80. botrun_flow_lang/utils/yaml_utils.py +9 -9
  81. {botrun_flow_lang-5.10.82.dist-info → botrun_flow_lang-5.10.83.dist-info}/METADATA +3 -2
  82. botrun_flow_lang-5.10.83.dist-info/RECORD +99 -0
  83. botrun_flow_lang-5.10.82.dist-info/RECORD +0 -99
  84. {botrun_flow_lang-5.10.82.dist-info → botrun_flow_lang-5.10.83.dist-info}/WHEEL +0 -0
@@ -1,324 +1,324 @@
1
- from typing import Dict, List, Any, Optional
2
- from langchain_core.messages import (
3
- BaseMessage,
4
- SystemMessage,
5
- HumanMessage,
6
- AIMessage,
7
- message_to_dict,
8
- )
9
- import json
10
- from botrun_flow_lang.models.token_usage import TokenUsage, NodeUsage, ToolUsage
11
-
12
-
13
- def litellm_msgs_to_langchain_msgs(
14
- msgs: List[Dict], enable_prompt_caching: bool = False
15
- ) -> List[BaseMessage]:
16
- """
17
- Convert LiteLLM style messages to Langchain messages.
18
-
19
- Args:
20
- msgs: List of dictionaries with 'role' and 'content' keys
21
- enable_prompt_caching: Whether to enable prompt caching, anthropic only
22
- Returns:
23
- List of Langchain message objects
24
- """
25
- converted_msgs = []
26
- for msg in msgs:
27
- role = msg["role"]
28
- content = msg["content"]
29
-
30
- if role == "system":
31
- if enable_prompt_caching:
32
- converted_msgs.append(
33
- SystemMessage(
34
- content=[
35
- {
36
- "text": content,
37
- "type": "text",
38
- "cache_control": {"type": "ephemeral"},
39
- }
40
- ]
41
- )
42
- )
43
- else:
44
- converted_msgs.append(SystemMessage(content=content))
45
- elif role == "user":
46
- if enable_prompt_caching and isinstance(content, str):
47
- converted_msgs.append(
48
- HumanMessage(
49
- content=[
50
- {
51
- "text": content,
52
- "type": "text",
53
- "cache_control": {"type": "ephemeral"},
54
- }
55
- ]
56
- )
57
- )
58
- elif enable_prompt_caching and isinstance(content, list):
59
- for item in content:
60
- converted_msgs.append(
61
- HumanMessage(
62
- content=[
63
- {
64
- "text": item.get("text", ""),
65
- "type": "text",
66
- "cache_control": {"type": "ephemeral"},
67
- }
68
- ]
69
- )
70
- )
71
- elif content != "":
72
- converted_msgs.append(HumanMessage(content=content))
73
- elif role == "assistant":
74
- if enable_prompt_caching:
75
- converted_msgs.append(
76
- AIMessage(
77
- content=[
78
- {
79
- "text": content,
80
- "type": "text",
81
- "cache_control": {"type": "ephemeral"},
82
- }
83
- ]
84
- )
85
- )
86
- elif content != "":
87
- converted_msgs.append(AIMessage(content=content))
88
- else:
89
- raise ValueError(f"Unsupported role: {role}")
90
-
91
- return converted_msgs
92
-
93
-
94
- def langgraph_msgs_to_json(messages: List) -> Dict:
95
- new_messages = []
96
- for message in messages:
97
- if isinstance(message, BaseMessage):
98
- msg_dict = message_to_dict(message)
99
- new_messages.append(msg_dict)
100
- elif isinstance(message, list):
101
- inner_messages = []
102
- for inner_message in message:
103
- if isinstance(inner_message, BaseMessage):
104
- inner_messages.append(message_to_dict(inner_message))
105
- else:
106
- inner_messages.append(inner_message)
107
- new_messages.append(inner_messages)
108
- else:
109
- new_messages.append(message)
110
- return new_messages
111
-
112
-
113
- def convert_nested_structure(obj: Any) -> Any:
114
- """
115
- Recursively convert BaseMessage objects in nested dictionaries and lists.
116
- Always returns a new object without modifying the original.
117
- Also handles special cases like dict_keys and functools.partial.
118
-
119
- Args:
120
- obj: Any object that might contain BaseMessage objects
121
-
122
- Returns:
123
- A new object with all BaseMessage objects converted to dictionaries
124
- """
125
- if isinstance(obj, BaseMessage):
126
- return message_to_dict(obj)
127
- elif isinstance(obj, dict):
128
- return {key: convert_nested_structure(value) for key, value in obj.items()}
129
- elif isinstance(obj, list):
130
- return [convert_nested_structure(item) for item in obj]
131
- else:
132
- return obj
133
-
134
-
135
- class LangGraphEventEncoder(json.JSONEncoder):
136
- def default(self, obj):
137
- if isinstance(obj, BaseMessage):
138
- # 將 BaseMessage 轉換為可序列化的 dict
139
- return message_to_dict(obj)
140
- # 處理其他不可序列化物件,略過或轉為簡單表示
141
- try:
142
- return super().default(obj)
143
- except:
144
- return str(obj)
145
-
146
-
147
- def langgraph_event_to_json(event: Dict) -> str:
148
- """
149
- Convert a LangGraph event to JSON string, handling all nested BaseMessage objects.
150
-
151
- Args:
152
- event: Dictionary containing LangGraph event data
153
-
154
- Returns:
155
- JSON string representation of the event
156
- """
157
- # 直接使用 convert_nested_structure 轉換,不需要先 deepcopy
158
- # 因為 convert_nested_structure 已經會創建新物件
159
- # new_event = convert_nested_structure(event)
160
- return json.dumps(event, ensure_ascii=False, cls=LangGraphEventEncoder)
161
-
162
-
163
- def extract_token_usage_from_state(
164
- state: Dict[str, Any], possible_model_name: Optional[str] = None
165
- ) -> TokenUsage:
166
- """
167
- 從 state 中提取並整理 token usage 資訊,轉換成 TokenUsage 格式
168
-
169
- Args:
170
- state: Graph state dictionary
171
- possible_model_name: 可能的 model name,如果message 有找到,會使用 message 的 model name,否則會使用 possible_model_name
172
-
173
- Returns:
174
- TokenUsage object containing structured token usage information
175
- """
176
- try:
177
- nodes_usage = []
178
- total_input_tokens = 0
179
- total_output_tokens = 0
180
- total_tokens = 0
181
- import pathlib
182
-
183
- # current_dir = pathlib.Path(__file__).parent
184
- # with open(current_dir / "messages.json", "w", encoding="utf-8") as f:
185
- # json_messages = {"messages": []}
186
- # for i, message in enumerate(state["messages"]):
187
- # if not isinstance(message, dict):
188
- # message = message_to_dict(message)
189
- # json_messages["messages"].append(message)
190
- # json.dump(json_messages, f, ensure_ascii=False, indent=2, sort_keys=True)
191
-
192
- # 檢查 messages 是否存在
193
- if "messages" in state:
194
- messages = state["messages"]
195
- # write messages to file
196
- for i, message in enumerate(messages):
197
- if not isinstance(message, dict):
198
- message = message_to_dict(message)
199
-
200
- # 檢查 usage_metadata 是否在 data 中
201
- usage_metadata = None
202
- if isinstance(message.get("data"), dict):
203
- usage_metadata = message["data"].get("usage_metadata")
204
- elif "usage_metadata" in message:
205
- usage_metadata = message["usage_metadata"]
206
-
207
- if usage_metadata:
208
- node_name = f"message_{i}"
209
- if message.get("data", {}).get("id"):
210
- node_name = message["data"]["id"]
211
- elif message.get("id"):
212
- node_name = message["id"]
213
-
214
- # 提取 model_name
215
- model_name = None
216
- if (
217
- message.get("data", {})
218
- .get("response_metadata", {})
219
- .get("model_name")
220
- ):
221
- model_name = message["data"]["response_metadata"]["model_name"]
222
- elif message.get("response_metadata", {}).get("model_name"):
223
- model_name = message["response_metadata"]["model_name"]
224
- if not model_name:
225
- model_name = possible_model_name
226
-
227
- # 提取 tool_calls 資訊
228
- tools = []
229
- if message.get("data", {}).get("tool_calls"):
230
- for tool_call in message["data"]["tool_calls"]:
231
- tools.append(
232
- ToolUsage(
233
- tool_name=tool_call["name"],
234
- input_tokens=0,
235
- output_tokens=0,
236
- total_tokens=0,
237
- )
238
- )
239
- elif message.get("tool_calls"):
240
- for tool_call in message["tool_calls"]:
241
- tools.append(
242
- ToolUsage(
243
- tool_name=tool_call["name"],
244
- input_tokens=0,
245
- output_tokens=0,
246
- total_tokens=0,
247
- )
248
- )
249
-
250
- node_usage = NodeUsage(
251
- node_name=node_name,
252
- model_name=model_name,
253
- input_tokens=usage_metadata.get("input_tokens", 0),
254
- output_tokens=usage_metadata.get("output_tokens", 0),
255
- total_tokens=usage_metadata.get("total_tokens", 0),
256
- tools=tools if tools else None,
257
- )
258
-
259
- # 如果有 input_token_details,加入到 metadata
260
- if "input_token_details" in usage_metadata:
261
- node_usage.metadata = {
262
- "input_token_details": usage_metadata["input_token_details"]
263
- }
264
-
265
- nodes_usage.append(node_usage)
266
- total_input_tokens += node_usage.input_tokens
267
- total_output_tokens += node_usage.output_tokens
268
- total_tokens += node_usage.total_tokens
269
-
270
- # 遍歷 state 中的其他 node
271
- for key, value in state.items():
272
- if (
273
- isinstance(value, dict)
274
- and "usage_metadata" in value
275
- and key != "messages"
276
- ):
277
- node_usage = NodeUsage(
278
- node_name=key,
279
- input_tokens=value["usage_metadata"].get("input_tokens", 0),
280
- output_tokens=value["usage_metadata"].get("output_tokens", 0),
281
- total_tokens=value["usage_metadata"].get("total_tokens", 0),
282
- )
283
-
284
- # 如果有 tool usage 資訊,也加入
285
- if "tools_usage" in value["usage_metadata"]:
286
- tools = []
287
- for tool_name, tool_usage in value["usage_metadata"][
288
- "tools_usage"
289
- ].items():
290
- tools.append(
291
- ToolUsage(
292
- tool_name=tool_name,
293
- input_tokens=tool_usage.get("input_tokens", 0),
294
- output_tokens=tool_usage.get("output_tokens", 0),
295
- total_tokens=tool_usage.get("total_tokens", 0),
296
- metadata=tool_usage.get("metadata", None),
297
- )
298
- )
299
- node_usage.tools = tools
300
-
301
- nodes_usage.append(node_usage)
302
- total_input_tokens += node_usage.input_tokens
303
- total_output_tokens += node_usage.output_tokens
304
- total_tokens += node_usage.total_tokens
305
-
306
- # 即使沒有找到任何 token usage 資訊,也返回一個空的 TokenUsage
307
- return TokenUsage(
308
- total_input_tokens=total_input_tokens,
309
- total_output_tokens=total_output_tokens,
310
- total_tokens=total_tokens,
311
- nodes=nodes_usage,
312
- )
313
- except Exception as e:
314
- import traceback
315
-
316
- traceback.print_exc()
317
- print(f"Error extracting token usage: {str(e)}")
318
- # 發生錯誤時返回空的 TokenUsage
319
- return TokenUsage(
320
- total_input_tokens=0,
321
- total_output_tokens=0,
322
- total_tokens=0,
323
- nodes=[],
324
- )
1
+ from typing import Dict, List, Any, Optional
2
+ from langchain_core.messages import (
3
+ BaseMessage,
4
+ SystemMessage,
5
+ HumanMessage,
6
+ AIMessage,
7
+ message_to_dict,
8
+ )
9
+ import json
10
+ from botrun_flow_lang.models.token_usage import TokenUsage, NodeUsage, ToolUsage
11
+
12
+
13
+ def litellm_msgs_to_langchain_msgs(
14
+ msgs: List[Dict], enable_prompt_caching: bool = False
15
+ ) -> List[BaseMessage]:
16
+ """
17
+ Convert LiteLLM style messages to Langchain messages.
18
+
19
+ Args:
20
+ msgs: List of dictionaries with 'role' and 'content' keys
21
+ enable_prompt_caching: Whether to enable prompt caching, anthropic only
22
+ Returns:
23
+ List of Langchain message objects
24
+ """
25
+ converted_msgs = []
26
+ for msg in msgs:
27
+ role = msg["role"]
28
+ content = msg["content"]
29
+
30
+ if role == "system":
31
+ if enable_prompt_caching:
32
+ converted_msgs.append(
33
+ SystemMessage(
34
+ content=[
35
+ {
36
+ "text": content,
37
+ "type": "text",
38
+ "cache_control": {"type": "ephemeral"},
39
+ }
40
+ ]
41
+ )
42
+ )
43
+ else:
44
+ converted_msgs.append(SystemMessage(content=content))
45
+ elif role == "user":
46
+ if enable_prompt_caching and isinstance(content, str):
47
+ converted_msgs.append(
48
+ HumanMessage(
49
+ content=[
50
+ {
51
+ "text": content,
52
+ "type": "text",
53
+ "cache_control": {"type": "ephemeral"},
54
+ }
55
+ ]
56
+ )
57
+ )
58
+ elif enable_prompt_caching and isinstance(content, list):
59
+ for item in content:
60
+ converted_msgs.append(
61
+ HumanMessage(
62
+ content=[
63
+ {
64
+ "text": item.get("text", ""),
65
+ "type": "text",
66
+ "cache_control": {"type": "ephemeral"},
67
+ }
68
+ ]
69
+ )
70
+ )
71
+ elif content != "":
72
+ converted_msgs.append(HumanMessage(content=content))
73
+ elif role == "assistant":
74
+ if enable_prompt_caching:
75
+ converted_msgs.append(
76
+ AIMessage(
77
+ content=[
78
+ {
79
+ "text": content,
80
+ "type": "text",
81
+ "cache_control": {"type": "ephemeral"},
82
+ }
83
+ ]
84
+ )
85
+ )
86
+ elif content != "":
87
+ converted_msgs.append(AIMessage(content=content))
88
+ else:
89
+ raise ValueError(f"Unsupported role: {role}")
90
+
91
+ return converted_msgs
92
+
93
+
94
+ def langgraph_msgs_to_json(messages: List) -> Dict:
95
+ new_messages = []
96
+ for message in messages:
97
+ if isinstance(message, BaseMessage):
98
+ msg_dict = message_to_dict(message)
99
+ new_messages.append(msg_dict)
100
+ elif isinstance(message, list):
101
+ inner_messages = []
102
+ for inner_message in message:
103
+ if isinstance(inner_message, BaseMessage):
104
+ inner_messages.append(message_to_dict(inner_message))
105
+ else:
106
+ inner_messages.append(inner_message)
107
+ new_messages.append(inner_messages)
108
+ else:
109
+ new_messages.append(message)
110
+ return new_messages
111
+
112
+
113
+ def convert_nested_structure(obj: Any) -> Any:
114
+ """
115
+ Recursively convert BaseMessage objects in nested dictionaries and lists.
116
+ Always returns a new object without modifying the original.
117
+ Also handles special cases like dict_keys and functools.partial.
118
+
119
+ Args:
120
+ obj: Any object that might contain BaseMessage objects
121
+
122
+ Returns:
123
+ A new object with all BaseMessage objects converted to dictionaries
124
+ """
125
+ if isinstance(obj, BaseMessage):
126
+ return message_to_dict(obj)
127
+ elif isinstance(obj, dict):
128
+ return {key: convert_nested_structure(value) for key, value in obj.items()}
129
+ elif isinstance(obj, list):
130
+ return [convert_nested_structure(item) for item in obj]
131
+ else:
132
+ return obj
133
+
134
+
135
+ class LangGraphEventEncoder(json.JSONEncoder):
136
+ def default(self, obj):
137
+ if isinstance(obj, BaseMessage):
138
+ # 將 BaseMessage 轉換為可序列化的 dict
139
+ return message_to_dict(obj)
140
+ # 處理其他不可序列化物件,略過或轉為簡單表示
141
+ try:
142
+ return super().default(obj)
143
+ except:
144
+ return str(obj)
145
+
146
+
147
+ def langgraph_event_to_json(event: Dict) -> str:
148
+ """
149
+ Convert a LangGraph event to JSON string, handling all nested BaseMessage objects.
150
+
151
+ Args:
152
+ event: Dictionary containing LangGraph event data
153
+
154
+ Returns:
155
+ JSON string representation of the event
156
+ """
157
+ # 直接使用 convert_nested_structure 轉換,不需要先 deepcopy
158
+ # 因為 convert_nested_structure 已經會創建新物件
159
+ # new_event = convert_nested_structure(event)
160
+ return json.dumps(event, ensure_ascii=False, cls=LangGraphEventEncoder)
161
+
162
+
163
+ def extract_token_usage_from_state(
164
+ state: Dict[str, Any], possible_model_name: Optional[str] = None
165
+ ) -> TokenUsage:
166
+ """
167
+ 從 state 中提取並整理 token usage 資訊,轉換成 TokenUsage 格式
168
+
169
+ Args:
170
+ state: Graph state dictionary
171
+ possible_model_name: 可能的 model name,如果message 有找到,會使用 message 的 model name,否則會使用 possible_model_name
172
+
173
+ Returns:
174
+ TokenUsage object containing structured token usage information
175
+ """
176
+ try:
177
+ nodes_usage = []
178
+ total_input_tokens = 0
179
+ total_output_tokens = 0
180
+ total_tokens = 0
181
+ import pathlib
182
+
183
+ # current_dir = pathlib.Path(__file__).parent
184
+ # with open(current_dir / "messages.json", "w", encoding="utf-8") as f:
185
+ # json_messages = {"messages": []}
186
+ # for i, message in enumerate(state["messages"]):
187
+ # if not isinstance(message, dict):
188
+ # message = message_to_dict(message)
189
+ # json_messages["messages"].append(message)
190
+ # json.dump(json_messages, f, ensure_ascii=False, indent=2, sort_keys=True)
191
+
192
+ # 檢查 messages 是否存在
193
+ if "messages" in state:
194
+ messages = state["messages"]
195
+ # write messages to file
196
+ for i, message in enumerate(messages):
197
+ if not isinstance(message, dict):
198
+ message = message_to_dict(message)
199
+
200
+ # 檢查 usage_metadata 是否在 data 中
201
+ usage_metadata = None
202
+ if isinstance(message.get("data"), dict):
203
+ usage_metadata = message["data"].get("usage_metadata")
204
+ elif "usage_metadata" in message:
205
+ usage_metadata = message["usage_metadata"]
206
+
207
+ if usage_metadata:
208
+ node_name = f"message_{i}"
209
+ if message.get("data", {}).get("id"):
210
+ node_name = message["data"]["id"]
211
+ elif message.get("id"):
212
+ node_name = message["id"]
213
+
214
+ # 提取 model_name
215
+ model_name = None
216
+ if (
217
+ message.get("data", {})
218
+ .get("response_metadata", {})
219
+ .get("model_name")
220
+ ):
221
+ model_name = message["data"]["response_metadata"]["model_name"]
222
+ elif message.get("response_metadata", {}).get("model_name"):
223
+ model_name = message["response_metadata"]["model_name"]
224
+ if not model_name:
225
+ model_name = possible_model_name
226
+
227
+ # 提取 tool_calls 資訊
228
+ tools = []
229
+ if message.get("data", {}).get("tool_calls"):
230
+ for tool_call in message["data"]["tool_calls"]:
231
+ tools.append(
232
+ ToolUsage(
233
+ tool_name=tool_call["name"],
234
+ input_tokens=0,
235
+ output_tokens=0,
236
+ total_tokens=0,
237
+ )
238
+ )
239
+ elif message.get("tool_calls"):
240
+ for tool_call in message["tool_calls"]:
241
+ tools.append(
242
+ ToolUsage(
243
+ tool_name=tool_call["name"],
244
+ input_tokens=0,
245
+ output_tokens=0,
246
+ total_tokens=0,
247
+ )
248
+ )
249
+
250
+ node_usage = NodeUsage(
251
+ node_name=node_name,
252
+ model_name=model_name,
253
+ input_tokens=usage_metadata.get("input_tokens", 0),
254
+ output_tokens=usage_metadata.get("output_tokens", 0),
255
+ total_tokens=usage_metadata.get("total_tokens", 0),
256
+ tools=tools if tools else None,
257
+ )
258
+
259
+ # 如果有 input_token_details,加入到 metadata
260
+ if "input_token_details" in usage_metadata:
261
+ node_usage.metadata = {
262
+ "input_token_details": usage_metadata["input_token_details"]
263
+ }
264
+
265
+ nodes_usage.append(node_usage)
266
+ total_input_tokens += node_usage.input_tokens
267
+ total_output_tokens += node_usage.output_tokens
268
+ total_tokens += node_usage.total_tokens
269
+
270
+ # 遍歷 state 中的其他 node
271
+ for key, value in state.items():
272
+ if (
273
+ isinstance(value, dict)
274
+ and "usage_metadata" in value
275
+ and key != "messages"
276
+ ):
277
+ node_usage = NodeUsage(
278
+ node_name=key,
279
+ input_tokens=value["usage_metadata"].get("input_tokens", 0),
280
+ output_tokens=value["usage_metadata"].get("output_tokens", 0),
281
+ total_tokens=value["usage_metadata"].get("total_tokens", 0),
282
+ )
283
+
284
+ # 如果有 tool usage 資訊,也加入
285
+ if "tools_usage" in value["usage_metadata"]:
286
+ tools = []
287
+ for tool_name, tool_usage in value["usage_metadata"][
288
+ "tools_usage"
289
+ ].items():
290
+ tools.append(
291
+ ToolUsage(
292
+ tool_name=tool_name,
293
+ input_tokens=tool_usage.get("input_tokens", 0),
294
+ output_tokens=tool_usage.get("output_tokens", 0),
295
+ total_tokens=tool_usage.get("total_tokens", 0),
296
+ metadata=tool_usage.get("metadata", None),
297
+ )
298
+ )
299
+ node_usage.tools = tools
300
+
301
+ nodes_usage.append(node_usage)
302
+ total_input_tokens += node_usage.input_tokens
303
+ total_output_tokens += node_usage.output_tokens
304
+ total_tokens += node_usage.total_tokens
305
+
306
+ # 即使沒有找到任何 token usage 資訊,也返回一個空的 TokenUsage
307
+ return TokenUsage(
308
+ total_input_tokens=total_input_tokens,
309
+ total_output_tokens=total_output_tokens,
310
+ total_tokens=total_tokens,
311
+ nodes=nodes_usage,
312
+ )
313
+ except Exception as e:
314
+ import traceback
315
+
316
+ traceback.print_exc()
317
+ print(f"Error extracting token usage: {str(e)}")
318
+ # 發生錯誤時返回空的 TokenUsage
319
+ return TokenUsage(
320
+ total_input_tokens=0,
321
+ total_output_tokens=0,
322
+ total_tokens=0,
323
+ nodes=[],
324
+ )