botrun-flow-lang 5.9.301__py3-none-any.whl → 5.10.82__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. botrun_flow_lang/api/auth_api.py +39 -39
  2. botrun_flow_lang/api/auth_utils.py +183 -183
  3. botrun_flow_lang/api/botrun_back_api.py +65 -65
  4. botrun_flow_lang/api/flow_api.py +3 -3
  5. botrun_flow_lang/api/hatch_api.py +481 -481
  6. botrun_flow_lang/api/langgraph_api.py +796 -796
  7. botrun_flow_lang/api/line_bot_api.py +1357 -1357
  8. botrun_flow_lang/api/model_api.py +300 -300
  9. botrun_flow_lang/api/rate_limit_api.py +32 -32
  10. botrun_flow_lang/api/routes.py +79 -79
  11. botrun_flow_lang/api/search_api.py +53 -53
  12. botrun_flow_lang/api/storage_api.py +316 -316
  13. botrun_flow_lang/api/subsidy_api.py +290 -290
  14. botrun_flow_lang/api/subsidy_api_system_prompt.txt +109 -109
  15. botrun_flow_lang/api/user_setting_api.py +70 -70
  16. botrun_flow_lang/api/version_api.py +31 -31
  17. botrun_flow_lang/api/youtube_api.py +26 -26
  18. botrun_flow_lang/constants.py +13 -13
  19. botrun_flow_lang/langgraph_agents/agents/agent_runner.py +174 -174
  20. botrun_flow_lang/langgraph_agents/agents/agent_tools/step_planner.py +77 -77
  21. botrun_flow_lang/langgraph_agents/agents/checkpointer/firestore_checkpointer.py +666 -666
  22. botrun_flow_lang/langgraph_agents/agents/gov_researcher/GOV_RESEARCHER_PRD.md +192 -192
  23. botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_2_graph.py +1002 -1002
  24. botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_graph.py +822 -822
  25. botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py +548 -542
  26. botrun_flow_lang/langgraph_agents/agents/search_agent_graph.py +864 -864
  27. botrun_flow_lang/langgraph_agents/agents/tools/__init__.py +4 -4
  28. botrun_flow_lang/langgraph_agents/agents/tools/gemini_code_execution.py +376 -376
  29. botrun_flow_lang/langgraph_agents/agents/util/gemini_grounding.py +66 -66
  30. botrun_flow_lang/langgraph_agents/agents/util/html_util.py +316 -316
  31. botrun_flow_lang/langgraph_agents/agents/util/img_util.py +294 -294
  32. botrun_flow_lang/langgraph_agents/agents/util/local_files.py +345 -345
  33. botrun_flow_lang/langgraph_agents/agents/util/mermaid_util.py +86 -86
  34. botrun_flow_lang/langgraph_agents/agents/util/model_utils.py +143 -143
  35. botrun_flow_lang/langgraph_agents/agents/util/pdf_analyzer.py +160 -160
  36. botrun_flow_lang/langgraph_agents/agents/util/perplexity_search.py +464 -464
  37. botrun_flow_lang/langgraph_agents/agents/util/plotly_util.py +59 -59
  38. botrun_flow_lang/langgraph_agents/agents/util/tavily_search.py +199 -199
  39. botrun_flow_lang/langgraph_agents/agents/util/youtube_util.py +90 -90
  40. botrun_flow_lang/langgraph_agents/cache/langgraph_botrun_cache.py +197 -197
  41. botrun_flow_lang/llm_agent/llm_agent.py +19 -19
  42. botrun_flow_lang/llm_agent/llm_agent_util.py +83 -83
  43. botrun_flow_lang/log/.gitignore +2 -2
  44. botrun_flow_lang/main.py +61 -61
  45. botrun_flow_lang/main_fast.py +51 -51
  46. botrun_flow_lang/mcp_server/__init__.py +10 -10
  47. botrun_flow_lang/mcp_server/default_mcp.py +711 -711
  48. botrun_flow_lang/models/nodes/utils.py +205 -205
  49. botrun_flow_lang/models/token_usage.py +34 -34
  50. botrun_flow_lang/requirements.txt +21 -21
  51. botrun_flow_lang/services/base/firestore_base.py +30 -30
  52. botrun_flow_lang/services/hatch/hatch_factory.py +11 -11
  53. botrun_flow_lang/services/hatch/hatch_fs_store.py +372 -372
  54. botrun_flow_lang/services/storage/storage_cs_store.py +202 -202
  55. botrun_flow_lang/services/storage/storage_factory.py +12 -12
  56. botrun_flow_lang/services/storage/storage_store.py +65 -65
  57. botrun_flow_lang/services/user_setting/user_setting_factory.py +9 -9
  58. botrun_flow_lang/services/user_setting/user_setting_fs_store.py +66 -66
  59. botrun_flow_lang/static/docs/tools/index.html +926 -926
  60. botrun_flow_lang/tests/api_functional_tests.py +1525 -1525
  61. botrun_flow_lang/tests/api_stress_test.py +357 -357
  62. botrun_flow_lang/tests/shared_hatch_tests.py +333 -333
  63. botrun_flow_lang/tests/test_botrun_app.py +46 -46
  64. botrun_flow_lang/tests/test_html_util.py +31 -31
  65. botrun_flow_lang/tests/test_img_analyzer.py +190 -190
  66. botrun_flow_lang/tests/test_img_util.py +39 -39
  67. botrun_flow_lang/tests/test_local_files.py +114 -114
  68. botrun_flow_lang/tests/test_mermaid_util.py +103 -103
  69. botrun_flow_lang/tests/test_pdf_analyzer.py +104 -104
  70. botrun_flow_lang/tests/test_plotly_util.py +151 -151
  71. botrun_flow_lang/tests/test_run_workflow_engine.py +65 -65
  72. botrun_flow_lang/tools/generate_docs.py +133 -133
  73. botrun_flow_lang/tools/templates/tools.html +153 -153
  74. botrun_flow_lang/utils/__init__.py +7 -7
  75. botrun_flow_lang/utils/botrun_logger.py +344 -344
  76. botrun_flow_lang/utils/clients/rate_limit_client.py +209 -209
  77. botrun_flow_lang/utils/clients/token_verify_client.py +153 -153
  78. botrun_flow_lang/utils/google_drive_utils.py +654 -654
  79. botrun_flow_lang/utils/langchain_utils.py +324 -324
  80. botrun_flow_lang/utils/yaml_utils.py +9 -9
  81. {botrun_flow_lang-5.9.301.dist-info → botrun_flow_lang-5.10.82.dist-info}/METADATA +2 -2
  82. botrun_flow_lang-5.10.82.dist-info/RECORD +99 -0
  83. botrun_flow_lang-5.9.301.dist-info/RECORD +0 -99
  84. {botrun_flow_lang-5.9.301.dist-info → botrun_flow_lang-5.10.82.dist-info}/WHEEL +0 -0
@@ -1,864 +1,864 @@
1
- from datetime import datetime
2
- from langchain_core.messages import SystemMessage, AIMessage, HumanMessage, ToolMessage
3
-
4
- from langgraph.graph import MessagesState
5
-
6
- from pydantic import BaseModel
7
- from langgraph.checkpoint.memory import MemorySaver
8
- from langgraph.checkpoint.base import BaseCheckpointSaver
9
-
10
- # from langchain_anthropic import ChatAnthropic
11
- # from langchain_openai import ChatOpenAI
12
-
13
- from langgraph.graph import StateGraph, START, END
14
-
15
- from langchain_core.language_models.chat_models import ChatGenerationChunk
16
- from langchain_core.messages import AIMessageChunk
17
- from langchain_core.runnables.config import (
18
- ensure_config,
19
- get_async_callback_manager_for_config,
20
- RunnableConfig,
21
- )
22
-
23
- from botrun_flow_lang.langgraph_agents.agents.util.perplexity_search import (
24
- respond_with_perplexity_search,
25
- )
26
-
27
-
28
- import os
29
- import time
30
- import logging
31
- from dotenv import load_dotenv
32
-
33
- load_dotenv()
34
-
35
- # configure logging to show timestamp and level
36
- logging.basicConfig(
37
- level=logging.INFO,
38
- format="%(asctime)s [%(levelname)s] %(message)s",
39
- datefmt="%Y-%m-%d %H:%M:%S",
40
- )
41
-
42
- REQUIREMENT_NODE = "requirement_node"
43
- ADD_REQUIREMENT_TOOL_MESSAGE_NODE = "add_requirement_tool_message"
44
- SEARCH_NODE = "search_node"
45
- RELATED_NODE = "related_node"
46
- NORMAL_CHAT_NODE = "normal_chat_node"
47
-
48
-
49
- REQUIREMENT_PROMPT_TEMPLATE = """
50
- 妳是臺灣人,回答要用臺灣繁體中文正式用語,需要的時候也可以用英文,可以親切,但不能隨便輕浮。在使用者合理的要求下請盡量配合他的需求,不要隨便拒絕
51
- 你要參考以下的 <原系統提示工程> 來判斷是否需要上網搜尋,你會參考<原系統提示工程>的範圍,超過此範圍,就不會上網搜尋,可以參考 <範例1>,<範例2>。
52
- 你的任務就是要判斷:
53
- - 是否要幫使用者上網搜尋(有/沒有)
54
-
55
- <範例1>
56
- 使用者提問:可以幫我寫python遞迴的程式嗎?
57
- 系統提示工程:你會幫忙找政府的津貼
58
- 思考:因為寫程式跟津貼無關,因此我不會回覆跟程式有關的內容。
59
- 回覆:我無法處理你的需求
60
- </範例1>
61
-
62
- <範例2>
63
- 使用者提問:可以幫我寫一個 COSTAR 的新聞稿嗎?
64
- 系統提示工程:你會幫忙找政府的津貼
65
- 思考:因為寫新聞稿跟津貼無關,因此我不會回覆跟寫新聞稿有關的內容。
66
- 回覆:我無法處理你的需求
67
- </範例2>
68
-
69
- 其它有搜尋需求的範例:
70
- - 留學獎學金可以申請多少?
71
- - 我上個月十月剛從馬偕醫院離職,我可以領勞保生育補助嗎
72
- - 請問我有個兩歲的孩子,可以領育兒補助到幾歲?
73
-
74
- 其它沒有需求的範例:
75
- - hi
76
- - 你好
77
- - 我是一個人
78
- - 你叫什麼名字?
79
- - 你今年幾歲?
80
- - 你住在哪裡?
81
- - 你喜歡吃什麼?
82
- - 你喜歡做什麼?
83
-
84
- 請遵守以下規則:
85
- - 瞭解使用者「有」需求之後,你不會跟他說類似「讓我先確認您是否有提出具體的需求。」、「需要更多細節」、「讓我先確認這個需求。」、「讓我先確認一下您是否已經提出具體的需求」、「我需要先了解一下您的情況。」的句子,你只會說「已經收到他的oo需求,讓我來處理」,但是你不會真的回覆。
86
- - 你不會直接回覆使用者的需求,你只會說已經收到,會開始幫他研究。
87
- - 你不會說任何有關「讓我先確認您是否有提出具體的需求。」、「讓我先確認您是否已經提出明確的需求」, 「讓我確認一下您目前是否有任何具體的需求。」的類似句子,如果你判斷他沒有提出需求,就直接先跟他聊天。
88
- - 你不會跟使用者要更多的資訊
89
-
90
- """
91
-
92
- default_search_prompt = """
93
- 001 你只會使用臺灣人習慣的語詞和繁體中文,採用正式但親和的語氣,以維護專業性並表達尊重。
94
- 002 妳會保持溫暖且親切的語氣,溫暖人心的護理師大姊姊,讓使用者在溝通時感受到支援和關愛。
95
- 003 妳會運用同理心來理解使用者的處境,特別是在討論敏感話題(如經濟困難)時,以謹慎和關懷的態度進行應,讓使用者感受到被傾聽和理解。
96
- 004 請你使用清晰的格式呈現資訊,如項目符號或編號列表,以提高可讀性。
97
- 005 請你在結尾附上參考來源,包含參考來源的名稱,以及參考來源的hyperlinks,以利使用者查詢。
98
- """
99
-
100
- search_prompt = """
101
- 現在的西元時間:{western_date}
102
- 現在的民國時間:{taiwan_date}
103
-
104
- {prompt}
105
- """
106
-
107
- DEFAULT_RELATED_PROMPT = """
108
- 你是一個專業的助手,請根據使用者的原始問題以及之前的回答內容,提供 3-5 個相關的後續問題建議。
109
- 這些問題應該:
110
- 1. 與原始問題和回答內容相關
111
- 2. 能夠幫助使用者更深入了解相關的補助或福利
112
- 3. 涵蓋不同面向,但都要與福利補助有關
113
- 4. 使用繁體中文提問
114
- 5. 每個問題都要簡潔明瞭,不超過 30 個字
115
-
116
- """
117
-
118
- related_question_text = """
119
-
120
- 使用者的原始問題是:
121
- {original_question}
122
-
123
- 之前的回答內容是:
124
- {previous_response}
125
-
126
- 請提供相關的後續問題建議。
127
- """
128
-
129
- NORMAL_CHAT_PROMPT_TEXT = """
130
- 妳是臺灣人,回答要用臺灣繁體中文正式用語,需要的時候也可以用英文,可以親切、但不能隨便輕浮。在使用者合理的要求下請盡量配合他的需求,不要隨便拒絕
131
- 你本來是要參考以下的 <原系統提示工程> 來回答問題,但是因為判斷不需要上網搜尋,所以你可以直接回覆使用者,你還是會參考<系統提示工程>的範圍,超過此範圍,你會跟使用者說無法回覆,但是不會按照<系統提示工程>的格式來回覆,可以參考 <範例1>,<範例2>。
132
-
133
- <範例1>
134
- 使用者提問:可以幫我寫python遞迴的程式嗎?
135
- 系統提示工程:你會幫忙找政府的津貼
136
- 思考:因為寫程式跟津貼無關,因此我不會回覆跟程式有關的內容。
137
- 回覆:我無法處理你的需求
138
- </範例1>
139
-
140
- <範例2>
141
- 使用者提問:可以幫我寫一個 COSTAR 的新聞稿嗎?
142
- 系統提示工程:你會幫忙找政府的津貼
143
- 思考:因為寫新聞稿跟津貼無關,因此我不會回覆跟寫新聞稿有關的內容。
144
- 回覆:我無法處理你的需求
145
- </範例2>
146
- """
147
-
148
- DEFAULT_MODEL_NAME = "gemini-2.5-flash"
149
-
150
-
151
- def limit_messages_with_user_first(filtered_messages):
152
- """
153
- 限制消息數量並確保第一個是用戶消息,達到兩個目的:
154
- 1. 確保第一個消息是用戶消息(如果存在用戶消息)
155
- 2. 限制返回的消息數量,避免輸入過長
156
-
157
- 如果消息超過10個,先取最後10個,然後確保第一個是用戶消息
158
- 如果第一個不是用戶消息,就往前多取,直到找到用戶消息
159
- """
160
- if len(filtered_messages) > 10:
161
- # 先取最後10個消息
162
- last_10_messages = filtered_messages[-10:]
163
-
164
- # 如果第一個已經是用戶消息,則直接使用最後10個
165
- if isinstance(last_10_messages[0], HumanMessage):
166
- return last_10_messages
167
- else:
168
- # 如果第一個不是用戶消息,就往前尋找用戶消息
169
- # 計算起始索引,從個數中減去10
170
- start_index = len(filtered_messages) - 10
171
-
172
- # 往前尋找用戶消息
173
- while start_index > 0:
174
- start_index -= 1
175
- # 檢查往前一個消息是否為用戶消息
176
- if isinstance(filtered_messages[start_index], HumanMessage):
177
- # 找到用戶消息,取從這個索引開始的消息
178
- return filtered_messages[start_index:]
179
-
180
- # 如果往前找到第一個位置依然沒有找到用戶消息,則保持原樣使用最後10個
181
- if start_index == 0 and not isinstance(filtered_messages[0], HumanMessage):
182
- return filtered_messages[-10:]
183
-
184
- # 如果消息不超過10個,直接返回原列表
185
- return filtered_messages
186
-
187
-
188
- def get_requirement_messages(messages, config):
189
- # 過濾掉最後的 assistant 消息
190
- filtered_messages = []
191
- for msg in messages:
192
- if isinstance(msg, HumanMessage):
193
- filtered_messages.append(msg)
194
- elif isinstance(msg, AIMessage) and not msg.tool_calls:
195
- # 只保留不含 tool_calls 的 AI 消息
196
- filtered_messages.append(msg)
197
-
198
- # 確保最後一條消息是 HumanMessage
199
- if filtered_messages and not isinstance(filtered_messages[-1], HumanMessage):
200
- filtered_messages.pop()
201
-
202
- # 縮短消息列表並確保第一個消息是用戶消息
203
- filtered_messages = limit_messages_with_user_first(filtered_messages)
204
-
205
- logging.info(
206
- f"[SearchAgentGraph:get_requirement_messages] return: {[SystemMessage(content=REQUIREMENT_PROMPT_TEMPLATE) + filtered_messages]}"
207
- )
208
- requirement_prompt = config["configurable"]["requirement_prompt"]
209
- requirement_prompt += """
210
- <原系統提示工程>
211
- {system_prompt}
212
- </原系統提示工程>
213
- """.format(
214
- system_prompt=config["configurable"]["search_prompt"]
215
- )
216
-
217
- return [SystemMessage(content=requirement_prompt)] + filtered_messages
218
-
219
-
220
- def format_dates(dt):
221
- """
222
- 將日期時間格式化為西元和民國格式
223
- 西元格式:yyyy-mm-dd hh:mm:ss
224
- 民國格式:(yyyy-1911)-mm-dd hh:mm:ss
225
- """
226
- western_date = dt.strftime("%Y-%m-%d %H:%M:%S")
227
- taiwan_year = dt.year - 1911
228
- taiwan_date = f"{taiwan_year}-{dt.strftime('%m-%d %H:%M:%S')}"
229
-
230
- return {"western_date": western_date, "taiwan_date": taiwan_date}
231
-
232
-
233
- def get_search_messages(state, config):
234
- filtered_messages = []
235
-
236
- # 取得當前時間並格式化
237
- now = datetime.now()
238
- dates = format_dates(now)
239
-
240
- for m in state["messages"]:
241
- if isinstance(m, HumanMessage):
242
- filtered_messages.append(m)
243
- elif isinstance(m, AIMessage) and not m.tool_calls:
244
- filtered_messages.append(m)
245
-
246
- # 縮短消息列表並確保第一個消息是用戶消息
247
- filtered_messages = limit_messages_with_user_first(filtered_messages)
248
-
249
- return [
250
- SystemMessage(
251
- content=search_prompt.format(
252
- western_date=dates["western_date"],
253
- taiwan_date=dates["taiwan_date"],
254
- prompt=config["configurable"]["search_prompt"],
255
- )
256
- )
257
- ] + filtered_messages
258
-
259
-
260
- class RequirementPromptInstructions(BaseModel):
261
- has_requirement: bool
262
-
263
-
264
- class RelatedQuestionsInstructions(BaseModel):
265
- related_questions: list[str]
266
-
267
-
268
- # llm_requirement = ChatAnthropic(
269
- # model="claude-3-7-sonnet-latest",
270
- # temperature=0,
271
- # )
272
-
273
- # llm_with_requirement_tool = llm_requirement.bind_tools(
274
- # [RequirementPromptInstructions],
275
- # )
276
-
277
- # llm_normal_chat = ChatOpenAI(
278
- # model="gpt-4o-mini",
279
- # temperature=0.7, # 使用較高的溫度以獲得更多樣化的建議
280
- # )
281
-
282
- # llm_related = ChatAnthropic(
283
- # model="claude-3-7-sonnet-latest",
284
- # temperature=0.7, # 使用較高的溫度以獲得更多樣化的建議
285
- # )
286
- # llm_with_related_tool = llm_related.bind_tools(
287
- # [RelatedQuestionsInstructions], tool_choice="RelatedQuestionsInstructions"
288
- # )
289
-
290
-
291
- def remove_empty_messages(messages):
292
- def _get_content(msg):
293
- if isinstance(msg, dict):
294
- return str(msg.get("content", ""))
295
- return str(getattr(msg, "content", ""))
296
-
297
- return [msg for msg in messages if _get_content(msg).strip() != ""]
298
-
299
-
300
- def clean_msg_for_search(messages):
301
- """
302
- - 移除 content 為空字串 / 空白 的訊息
303
- - 合併連續同角色 (只保留區段中的最後一筆)
304
- - 最後一筆一定為 user,否則往前尋找最近的 user
305
- """
306
- cleaned_msg = []
307
-
308
- def is_empty(msg):
309
- return str(msg.get("content", "")).strip() == ""
310
-
311
- for msg in messages:
312
- if is_empty(msg):
313
- continue
314
- if cleaned_msg and cleaned_msg[-1]["role"] == msg["role"]:
315
- cleaned_msg[-1] = msg
316
- else:
317
- cleaned_msg.append(msg)
318
-
319
- while cleaned_msg and cleaned_msg[-1]["role"] != "user":
320
- cleaned_msg.pop()
321
-
322
- return cleaned_msg
323
-
324
-
325
- def get_requirement_model():
326
- from langchain_anthropic import ChatAnthropic
327
- from langchain_openai import ChatOpenAI
328
- from langchain_google_genai import ChatGoogleGenerativeAI
329
-
330
- if os.getenv("OPENROUTER_API_KEY") and os.getenv("OPENROUTER_BASE_URL"):
331
- return ChatOpenAI(
332
- model=f"google/{DEFAULT_MODEL_NAME}",
333
- temperature=0,
334
- openai_api_key=os.getenv("OPENROUTER_API_KEY"),
335
- openai_api_base=os.getenv("OPENROUTER_BASE_URL"),
336
- )
337
- else:
338
- return ChatGoogleGenerativeAI(
339
- model=DEFAULT_MODEL_NAME,
340
- temperature=0,
341
- )
342
- # llm_requirement = ChatAnthropic(
343
- # model="claude-3-7-sonnet-latest",
344
- # temperature=0,
345
- # )
346
-
347
-
348
- def requirement_node(state, config):
349
- logging.info("[SearchAgentGraph:requirement_node] Enter node requirement_node")
350
-
351
- messages = get_requirement_messages(state["messages"], config)
352
- logging.info(
353
- f"[SearchAgentGraph:requirement_node] Get requirement messages:{messages}"
354
- )
355
-
356
- # 確保最後一條消息是 HumanMessage
357
- if not messages or not isinstance(messages[-1], HumanMessage):
358
- logging.info(
359
- f"[SearchAgentGraph:requirement_node] No requirement messages, return original messages"
360
- )
361
- return {"messages": state["messages"]}
362
-
363
- llm_requirement = get_requirement_model()
364
- from trustcall import create_extractor
365
-
366
- llm_with_requirement_tool = create_extractor(
367
- llm_requirement,
368
- tools=[RequirementPromptInstructions],
369
- tool_choice="RequirementPromptInstructions",
370
- )
371
- # llm_with_requirement_tool = llm_requirement.bind_tools(
372
- # [RequirementPromptInstructions],
373
- # )
374
- # logging.info(f"[SearchAgentGraph:requirement_node] messages: {messages}")
375
- messages = remove_empty_messages(messages)
376
- logging.info(f"[SearchAgentGraph:requirement_node] messages len:{len(messages)}")
377
- response = llm_with_requirement_tool.invoke(messages)
378
- return response
379
-
380
-
381
- async def search_with_perplexity_stream(state, config):
382
- messages = get_search_messages(state, config)
383
- logging.info(
384
- f"[SearchAgentGraph:search_with_perplexity_stream] messages len:{len(messages)}"
385
- )
386
- # 確保配置正確
387
- config = ensure_config(config | {"tags": ["agent_llm"]})
388
- callback_manager = get_async_callback_manager_for_config(config)
389
-
390
- # 開始 LLM 運行
391
- model_name_cfg = config["configurable"]["model_name"]
392
- llm_run_managers = await callback_manager.on_chat_model_start(
393
- {"name": f"perplexity/{model_name_cfg}"},
394
- [messages],
395
- )
396
-
397
- # llm_run_managers = await callback_manager.on_chat_model_start({}, [messages])
398
- llm_run_manager = llm_run_managers[0]
399
-
400
- # 將 messages 轉換為 Gemini 格式
401
- messages_for_llm = []
402
- input_content = ""
403
- for msg in messages:
404
- if isinstance(msg, HumanMessage):
405
- messages_for_llm.append({"role": "user", "content": msg.content})
406
- input_content = msg.content
407
- elif isinstance(msg, AIMessage) and not msg.tool_calls:
408
- if (
409
- isinstance(msg.content, list)
410
- and isinstance(msg.content[0], dict)
411
- and msg.content[0].get("text", "")
412
- ):
413
- messages_for_llm.append(
414
- {"role": "assistant", "content": msg.content[0].get("text", "")}
415
- )
416
- else:
417
- messages_for_llm.append({"role": "assistant", "content": msg.content})
418
- elif isinstance(msg, SystemMessage):
419
- if len(messages_for_llm) > 0 and messages_for_llm[0]["role"] != "system":
420
- messages_for_llm.insert(0, {"role": "system", "content": msg.content})
421
- elif len(messages_for_llm) > 0 and messages_for_llm[0]["role"] == "system":
422
- messages_for_llm[0]["content"] = msg.content
423
- elif len(messages_for_llm) == 0:
424
- messages_for_llm.append({"role": "system", "content": msg.content})
425
-
426
- messages_for_llm = clean_msg_for_search(messages_for_llm)
427
- logging.info(
428
- f"[SearchAgentGraph:search_with_perplexity_stream] messages_for_llm:{messages_for_llm}"
429
- )
430
- full_response = ""
431
- try:
432
- async for event in respond_with_perplexity_search(
433
- input_content,
434
- config["configurable"]["user_prompt_prefix"],
435
- messages_for_llm,
436
- config["configurable"]["domain_filter"],
437
- config["configurable"]["stream"],
438
- config["configurable"]["model_name"],
439
- ):
440
- # 將回應包裝成 ChatGenerationChunk 以支援 stream_mode="messages"
441
- chunk = ChatGenerationChunk(
442
- message=AIMessageChunk(
443
- content=event.chunk,
444
- )
445
- )
446
-
447
- # 使用 callback manager 處理新的 token
448
- await llm_run_manager.on_llm_new_token(
449
- event.chunk,
450
- chunk=chunk,
451
- )
452
- full_response += event.chunk
453
-
454
- if event.raw_json:
455
- last_event_json = event.raw_json
456
-
457
- except Exception as e:
458
- await llm_run_manager.on_llm_error(e)
459
- raise
460
- finally:
461
- # 確保on_chat_model_end事件被觸發
462
- from langchain_core.outputs import ChatGeneration, LLMResult
463
-
464
- usage_raw = last_event_json.get("usage", {})
465
- usage_metadata = {
466
- "input_tokens": usage_raw.get("prompt_tokens", 0),
467
- "output_tokens": usage_raw.get("completion_tokens", 0),
468
- "total_tokens": usage_raw.get(
469
- "total_tokens",
470
- usage_raw.get("prompt_tokens", 0)
471
- + usage_raw.get("completion_tokens", 0),
472
- ),
473
- "input_token_details": {},
474
- "output_token_details": {},
475
- }
476
- model_name = last_event_json.get("model", "")
477
-
478
- ai_msg = AIMessage(
479
- content=full_response,
480
- response_metadata={"finish_reason": "tool_calls", "model_name": model_name},
481
- usage_metadata=usage_metadata,
482
- )
483
-
484
- generation = ChatGeneration(message=ai_msg)
485
-
486
- llm_result = LLMResult(
487
- generations=[[generation]],
488
- llm_output={"usage_metadata": usage_metadata},
489
- )
490
-
491
- await llm_run_manager.on_llm_end(llm_result)
492
-
493
- if full_response:
494
- return {"messages": [ai_msg]}
495
- else:
496
- return {}
497
-
498
-
499
- async def search_with_gemini_grounding(state, config):
500
- # 放到要用的時候才 import,不然loading 會花時間
501
- from botrun_flow_lang.langgraph_agents.agents.util.gemini_grounding import (
502
- respond_with_gemini_grounding,
503
- )
504
-
505
- messages = get_search_messages(state, config)
506
-
507
- # 確保配置正確
508
- config = ensure_config(config | {"tags": ["agent_llm"]})
509
- callback_manager = get_async_callback_manager_for_config(config)
510
-
511
- # 開始 LLM 運行
512
- llm_run_managers = await callback_manager.on_chat_model_start({}, [messages])
513
- llm_run_manager = llm_run_managers[0]
514
-
515
- # 將 messages 轉換為 Gemini 格式
516
- messages_for_llm = []
517
- input_content = ""
518
- for msg in messages:
519
- if isinstance(msg, HumanMessage):
520
- messages_for_llm.append({"role": "user", "content": msg.content})
521
- input_content = msg.content
522
- elif isinstance(msg, AIMessage) and not msg.tool_calls:
523
- if (
524
- isinstance(msg.content, list)
525
- and isinstance(msg.content[0], dict)
526
- and msg.content[0].get("text", "")
527
- ):
528
- messages_for_llm.append(
529
- {"role": "assistant", "content": msg.content[0].get("text", "")}
530
- )
531
- else:
532
- messages_for_llm.append({"role": "assistant", "content": msg.content})
533
-
534
- full_response = ""
535
- async for event in respond_with_gemini_grounding(input_content, messages_for_llm):
536
- # 將回應包裝成 ChatGenerationChunk 以支援 stream_mode="messages"
537
- chunk = ChatGenerationChunk(
538
- message=AIMessageChunk(
539
- content=event.chunk,
540
- )
541
- )
542
-
543
- # await adispatch_custom_event(
544
- # "on_custom_event",
545
- # {"chunk": event.chunk},
546
- # config=config, # <-- propagate config
547
- # )
548
- # 使用 callback manager 處理新的 token
549
- await llm_run_manager.on_llm_new_token(
550
- event.chunk,
551
- chunk=chunk,
552
- )
553
- full_response += event.chunk
554
-
555
- if full_response:
556
- return {"messages": [AIMessage(content=full_response)]}
557
- else:
558
- return {}
559
-
560
-
561
- async def search_node(state, config: RunnableConfig):
562
- start = time.time()
563
- logging.info("[SearchAgentGraph:search_node] Enter node search_node")
564
-
565
- t1 = time.time()
566
- for key in DEFAULT_SEARCH_CONFIG.keys():
567
- if key not in config["configurable"]:
568
- config["configurable"][key] = DEFAULT_SEARCH_CONFIG[key]
569
-
570
- search_vendor = config["configurable"]["search_vendor"]
571
- logging.info(
572
- f"[SearchAgentGraph:search_node] Check configurable settings, elapsed {time.time() - t1:.3f}s"
573
- )
574
-
575
- t2 = time.time()
576
- if search_vendor == SEARCH_VENDOR_PLEXITY:
577
- result = await search_with_perplexity_stream(state, config)
578
- else:
579
- result = await search_with_gemini_grounding(state, config)
580
- logging.info(
581
- f"[SearchAgentGraph:search_node] Completed search operation, elapsed {time.time() - t2:.3f}s"
582
- )
583
-
584
- logging.info(
585
- f"[SearchAgentGraph:search_node] Exit node search_node, elapsed {time.time() - start:.3f}s"
586
- )
587
- return result
588
-
589
-
590
- def get_related_messages(state, config):
591
- """
592
- 獲取用於生成相關問題的消息列表
593
- """
594
- # 只保留人類消息和不含工具調用的 AI 消息
595
- filtered_messages = []
596
- previous_question = ""
597
- previous_response = ""
598
-
599
- for m in state["messages"]:
600
- if isinstance(m, HumanMessage):
601
- previous_question = m.content
602
- filtered_messages.append(m)
603
- elif isinstance(m, AIMessage) and not m.tool_calls:
604
- previous_response = m.content
605
- filtered_messages.append(m)
606
- elif isinstance(m, ToolMessage):
607
- filtered_messages.append(AIMessage(content=f"Tool Result: {m.content}"))
608
-
609
- # 縮短消息列表並確保第一個消息是用戶消息
610
- filtered_messages = limit_messages_with_user_first(filtered_messages)
611
-
612
- # 驗證 related_prompt 格式
613
- related_prompt = config["configurable"]["related_prompt"]
614
- related_prompt += related_question_text
615
-
616
- # 添加用於生成相關問題的提示
617
- filtered_messages.append(
618
- HumanMessage(
619
- content=related_prompt.format(
620
- original_question=previous_question,
621
- previous_response=previous_response,
622
- )
623
- )
624
- )
625
-
626
- return filtered_messages
627
-
628
-
629
- def get_normal_chat_messages(state, config):
630
- """
631
- 獲取用於生成相關問題的消息列表
632
- """
633
- # 只保留人類消息和不含工具調用的 AI 消息
634
- filtered_messages = []
635
-
636
- # 加入 system message
637
- normal_chat_prompt = config["configurable"]["normal_chat_prompt"]
638
- normal_chat_prompt += """
639
- <原系統提示工程>
640
- {system_prompt}
641
- </原系統提示工程>
642
- """.format(
643
- system_prompt=config["configurable"]["search_prompt"]
644
- )
645
-
646
- filtered_messages.append(SystemMessage(content=normal_chat_prompt))
647
-
648
- for m in state["messages"]:
649
- if isinstance(m, HumanMessage):
650
- filtered_messages.append(m)
651
- elif isinstance(m, AIMessage) and not m.tool_calls:
652
- filtered_messages.append(m)
653
- elif isinstance(m, ToolMessage):
654
- filtered_messages.append(AIMessage(content=f"Tool Result: {m.content}"))
655
-
656
- system_message = filtered_messages[0]
657
- # 縮短消息列表並確保第一個消息是用戶消息
658
- filtered_messages = limit_messages_with_user_first(filtered_messages)
659
-
660
- return [system_message] + filtered_messages
661
-
662
-
663
- def get_related_model():
664
- from langchain_anthropic import ChatAnthropic
665
- from langchain_openai import ChatOpenAI
666
- from langchain_google_genai import ChatGoogleGenerativeAI
667
-
668
- if os.getenv("OPENROUTER_API_KEY") and os.getenv("OPENROUTER_BASE_URL"):
669
- return ChatOpenAI(
670
- model=f"google/{DEFAULT_MODEL_NAME}",
671
- temperature=0,
672
- openai_api_key=os.getenv("OPENROUTER_API_KEY"),
673
- openai_api_base=os.getenv("OPENROUTER_BASE_URL"),
674
- )
675
- else:
676
- return ChatGoogleGenerativeAI(
677
- model=DEFAULT_MODEL_NAME,
678
- temperature=0,
679
- )
680
-
681
-
682
- def related_node(state, config: RunnableConfig):
683
- logging.info("[SearchAgentGraph:related_node] Enter node related_node")
684
- for key in DEFAULT_SEARCH_CONFIG.keys():
685
- if key not in config["configurable"]:
686
- config["configurable"][key] = DEFAULT_SEARCH_CONFIG[key]
687
-
688
- messages = get_related_messages(state, config)
689
- #  放到要用的時候才 import,不然loading 會花時間
690
- # from langchain_anthropic import ChatAnthropic
691
- llm_related = get_related_model()
692
- # llm_related = ChatAnthropic(
693
- # model="claude-3-7-sonnet-latest",
694
- # temperature=0.7, # 使用較高的溫度以獲得更多樣化的建議
695
- # )
696
- llm_with_related_tool = llm_related.bind_tools(
697
- [RelatedQuestionsInstructions], tool_choice="RelatedQuestionsInstructions"
698
- )
699
-
700
- messages = remove_empty_messages(messages)
701
- response = llm_with_related_tool.invoke(messages)
702
-
703
- if response.tool_calls:
704
- result = {
705
- "messages": [
706
- response,
707
- ToolMessage(
708
- content=str(response.tool_calls[0]["args"]["related_questions"]),
709
- tool_call_id=response.tool_calls[0]["id"],
710
- ),
711
- ],
712
- "related_questions": response.tool_calls[0]["args"]["related_questions"],
713
- }
714
- return result
715
- return {"messages": [response]}
716
-
717
-
718
- def get_normal_chat_model():
719
- from langchain_openai import ChatOpenAI
720
- from langchain_google_genai import ChatGoogleGenerativeAI
721
-
722
- if os.getenv("OPENROUTER_API_KEY") and os.getenv("OPENROUTER_BASE_URL"):
723
- return ChatOpenAI(
724
- model="google/gemini-2.5-flash",
725
- temperature=0,
726
- openai_api_key=os.getenv("OPENROUTER_API_KEY"),
727
- openai_api_base=os.getenv("OPENROUTER_BASE_URL"),
728
- )
729
- # return ChatOpenAI(
730
- # model="openai/gpt-4.1-mini",
731
- # temperature=0.7, # 使用較高的溫度以獲得更多樣化的建議
732
- # openai_api_key=os.getenv("OPENROUTER_API_KEY"),
733
- # openai_api_base=os.getenv("OPENROUTER_BASE_URL"),
734
- # )
735
- else:
736
- return ChatGoogleGenerativeAI(
737
- model="gemini-2.5-flash",
738
- temperature=0,
739
- )
740
- # return ChatOpenAI(
741
- # model="gpt-4.1-mini-2025-04-14",
742
- # temperature=0.7,
743
- # )
744
-
745
-
746
- def normal_chat_node(state, config: RunnableConfig):
747
- logging.info("[SearchAgentGraph:normal_chat_node] Enter node normal_chat_node")
748
- for key in DEFAULT_SEARCH_CONFIG.keys():
749
- if key not in config["configurable"]:
750
- config["configurable"][key] = DEFAULT_SEARCH_CONFIG[key]
751
-
752
- messages = get_normal_chat_messages(state, config)
753
-
754
- #  放到要用的時候才 import,不然loading 會花時間
755
- # from langchain_openai import ChatOpenAI
756
-
757
- llm_normal_chat = get_normal_chat_model()
758
- messages = remove_empty_messages(messages)
759
- response = llm_normal_chat.invoke(messages)
760
- return {
761
- "messages": [response],
762
- "related_questions": [],
763
- }
764
-
765
-
766
- def get_requirement_next_state(state):
767
- start = time.time()
768
- logging.info(
769
- "[SearchAgentGraph:get_requirement_next_state] Enter node get_requirement_next_state"
770
- )
771
- messages = state["messages"]
772
- if isinstance(messages[-1], AIMessage) and messages[-1].tool_calls:
773
- tool_call = messages[-1].tool_calls[0]
774
- if tool_call["args"].get("has_requirement", False):
775
- result = ADD_REQUIREMENT_TOOL_MESSAGE_NODE
776
- else:
777
- result = NORMAL_CHAT_NODE
778
- elif not isinstance(messages[-1], HumanMessage):
779
- result = END
780
- else:
781
- result = END
782
- logging.info(
783
- f"[SearchAgentGraph:get_requirement_next_state] Exit node get_requirement_next_state, elapsed {time.time() - start:.3f}s"
784
- )
785
- return result
786
-
787
-
788
- class SearchState(MessagesState):
789
- related_questions: list[str] = []
790
-
791
-
792
- SEARCH_VENDOR_PLEXITY = "perplexity"
793
- SEARCH_VENDOR_GOOGLE = "google"
794
- DEFAULT_SEARCH_CONFIG = {
795
- "search_prompt": default_search_prompt,
796
- "model_name": "sonar-reasoning-pro",
797
- "requirement_prompt": REQUIREMENT_PROMPT_TEMPLATE,
798
- "related_prompt": DEFAULT_RELATED_PROMPT,
799
- "normal_chat_prompt": NORMAL_CHAT_PROMPT_TEXT,
800
- "search_vendor": SEARCH_VENDOR_PLEXITY,
801
- "domain_filter": [],
802
- "user_prompt_prefix": "",
803
- "stream": True,
804
- }
805
-
806
-
807
- class SearchAgentGraph:
808
- def __init__(self, memory: BaseCheckpointSaver = None):
809
- self.memory = memory if memory is not None else MemorySaver()
810
- self._initialize_graph()
811
-
812
- def _initialize_graph(self):
813
- workflow = StateGraph(SearchState)
814
- workflow.add_node(REQUIREMENT_NODE, requirement_node)
815
-
816
- @workflow.add_node
817
- def add_requirement_tool_message(state: MessagesState):
818
- start = time.time()
819
- logging.info(
820
- "[SearchAgentGraph:add_requirement_tool_message] Enter node add_requirement_tool_message"
821
- )
822
- result = {
823
- "messages": [
824
- ToolMessage(
825
- content=f"""
826
- 使用者有提出需求
827
- """,
828
- tool_call_id=state["messages"][-1].tool_calls[0]["id"],
829
- )
830
- ],
831
- "has_requirement": True,
832
- }
833
- logging.info(
834
- f"[SearchAgentGraph:add_requirement_tool_message] Exit node add_requirement_tool_message, elapsed {time.time() - start:.3f}s"
835
- )
836
- return result
837
-
838
- workflow.add_node(SEARCH_NODE, search_node)
839
- workflow.add_node(RELATED_NODE, related_node)
840
- workflow.add_node(NORMAL_CHAT_NODE, normal_chat_node)
841
- workflow.add_edge(START, REQUIREMENT_NODE)
842
- workflow.add_conditional_edges(
843
- REQUIREMENT_NODE,
844
- get_requirement_next_state,
845
- [ADD_REQUIREMENT_TOOL_MESSAGE_NODE, NORMAL_CHAT_NODE, END],
846
- )
847
- workflow.add_edge(ADD_REQUIREMENT_TOOL_MESSAGE_NODE, SEARCH_NODE)
848
-
849
- workflow.add_edge(SEARCH_NODE, RELATED_NODE)
850
- workflow.add_edge(NORMAL_CHAT_NODE, END)
851
- workflow.add_edge(RELATED_NODE, END)
852
- self._graph = workflow.compile(checkpointer=self.memory)
853
- self._graph2 = workflow.compile()
854
-
855
- @property
856
- def graph(self):
857
- return self._graph
858
-
859
- @property
860
- def graph2(self):
861
- return self._graph2
862
-
863
-
864
- search_agent_graph = SearchAgentGraph().graph2
1
+ from datetime import datetime
2
+ from langchain_core.messages import SystemMessage, AIMessage, HumanMessage, ToolMessage
3
+
4
+ from langgraph.graph import MessagesState
5
+
6
+ from pydantic import BaseModel
7
+ from langgraph.checkpoint.memory import MemorySaver
8
+ from langgraph.checkpoint.base import BaseCheckpointSaver
9
+
10
+ # from langchain_anthropic import ChatAnthropic
11
+ # from langchain_openai import ChatOpenAI
12
+
13
+ from langgraph.graph import StateGraph, START, END
14
+
15
+ from langchain_core.language_models.chat_models import ChatGenerationChunk
16
+ from langchain_core.messages import AIMessageChunk
17
+ from langchain_core.runnables.config import (
18
+ ensure_config,
19
+ get_async_callback_manager_for_config,
20
+ RunnableConfig,
21
+ )
22
+
23
+ from botrun_flow_lang.langgraph_agents.agents.util.perplexity_search import (
24
+ respond_with_perplexity_search,
25
+ )
26
+
27
+
28
+ import os
29
+ import time
30
+ import logging
31
+ from dotenv import load_dotenv
32
+
33
+ load_dotenv()
34
+
35
+ # configure logging to show timestamp and level
36
+ logging.basicConfig(
37
+ level=logging.INFO,
38
+ format="%(asctime)s [%(levelname)s] %(message)s",
39
+ datefmt="%Y-%m-%d %H:%M:%S",
40
+ )
41
+
42
+ REQUIREMENT_NODE = "requirement_node"
43
+ ADD_REQUIREMENT_TOOL_MESSAGE_NODE = "add_requirement_tool_message"
44
+ SEARCH_NODE = "search_node"
45
+ RELATED_NODE = "related_node"
46
+ NORMAL_CHAT_NODE = "normal_chat_node"
47
+
48
+
49
+ REQUIREMENT_PROMPT_TEMPLATE = """
50
+ 妳是臺灣人,回答要用臺灣繁體中文正式用語,需要的時候也可以用英文,可以親切,但不能隨便輕浮。在使用者合理的要求下請盡量配合他的需求,不要隨便拒絕
51
+ 你要參考以下的 <原系統提示工程> 來判斷是否需要上網搜尋,你會參考<原系統提示工程>的範圍,超過此範圍,就不會上網搜尋,可以參考 <範例1>,<範例2>。
52
+ 你的任務就是要判斷:
53
+ - 是否要幫使用者上網搜尋(有/沒有)
54
+
55
+ <範例1>
56
+ 使用者提問:可以幫我寫python遞迴的程式嗎?
57
+ 系統提示工程:你會幫忙找政府的津貼
58
+ 思考:因為寫程式跟津貼無關,因此我不會回覆跟程式有關的內容。
59
+ 回覆:我無法處理你的需求
60
+ </範例1>
61
+
62
+ <範例2>
63
+ 使用者提問:可以幫我寫一個 COSTAR 的新聞稿嗎?
64
+ 系統提示工程:你會幫忙找政府的津貼
65
+ 思考:因為寫新聞稿跟津貼無關,因此我不會回覆跟寫新聞稿有關的內容。
66
+ 回覆:我無法處理你的需求
67
+ </範例2>
68
+
69
+ 其它有搜尋需求的範例:
70
+ - 留學獎學金可以申請多少?
71
+ - 我上個月十月剛從馬偕醫院離職,我可以領勞保生育補助嗎
72
+ - 請問我有個兩歲的孩子,可以領育兒補助到幾歲?
73
+
74
+ 其它沒有需求的範例:
75
+ - hi
76
+ - 你好
77
+ - 我是一個人
78
+ - 你叫什麼名字?
79
+ - 你今年幾歲?
80
+ - 你住在哪裡?
81
+ - 你喜歡吃什麼?
82
+ - 你喜歡做什麼?
83
+
84
+ 請遵守以下規則:
85
+ - 瞭解使用者「有」需求之後,你不會跟他說類似「讓我先確認您是否有提出具體的需求。」、「需要更多細節」、「讓我先確認這個需求。」、「讓我先確認一下您是否已經提出具體的需求」、「我需要先了解一下您的情況。」的句子,你只會說「已經收到他的oo需求,讓我來處理」,但是你不會真的回覆。
86
+ - 你不會直接回覆使用者的需求,你只會說已經收到,會開始幫他研究。
87
+ - 你不會說任何有關「讓我先確認您是否有提出具體的需求。」、「讓我先確認您是否已經提出明確的需求」, 「讓我確認一下您目前是否有任何具體的需求。」的類似句子,如果你判斷他沒有提出需求,就直接先跟他聊天。
88
+ - 你不會跟使用者要更多的資訊
89
+
90
+ """
91
+
92
+ default_search_prompt = """
93
+ 001 你只會使用臺灣人習慣的語詞和繁體中文,採用正式但親和的語氣,以維護專業性並表達尊重。
94
+ 002 妳會保持溫暖且親切的語氣,溫暖人心的護理師大姊姊,讓使用者在溝通時感受到支援和關愛。
95
+ 003 妳會運用同理心來理解使用者的處境,特別是在討論敏感話題(如經濟困難)時,以謹慎和關懷的態度進行應,讓使用者感受到被傾聽和理解。
96
+ 004 請你使用清晰的格式呈現資訊,如項目符號或編號列表,以提高可讀性。
97
+ 005 請你在結尾附上參考來源,包含參考來源的名稱,以及參考來源的hyperlinks,以利使用者查詢。
98
+ """
99
+
100
+ search_prompt = """
101
+ 現在的西元時間:{western_date}
102
+ 現在的民國時間:{taiwan_date}
103
+
104
+ {prompt}
105
+ """
106
+
107
+ DEFAULT_RELATED_PROMPT = """
108
+ 你是一個專業的助手,請根據使用者的原始問題以及之前的回答內容,提供 3-5 個相關的後續問題建議。
109
+ 這些問題應該:
110
+ 1. 與原始問題和回答內容相關
111
+ 2. 能夠幫助使用者更深入了解相關的補助或福利
112
+ 3. 涵蓋不同面向,但都要與福利補助有關
113
+ 4. 使用繁體中文提問
114
+ 5. 每個問題都要簡潔明瞭,不超過 30 個字
115
+
116
+ """
117
+
118
+ related_question_text = """
119
+
120
+ 使用者的原始問題是:
121
+ {original_question}
122
+
123
+ 之前的回答內容是:
124
+ {previous_response}
125
+
126
+ 請提供相關的後續問題建議。
127
+ """
128
+
129
+ NORMAL_CHAT_PROMPT_TEXT = """
130
+ 妳是臺灣人,回答要用臺灣繁體中文正式用語,需要的時候也可以用英文,可以親切、但不能隨便輕浮。在使用者合理的要求下請盡量配合他的需求,不要隨便拒絕
131
+ 你本來是要參考以下的 <原系統提示工程> 來回答問題,但是因為判斷不需要上網搜尋,所以你可以直接回覆使用者,你還是會參考<系統提示工程>的範圍,超過此範圍,你會跟使用者說無法回覆,但是不會按照<系統提示工程>的格式來回覆,可以參考 <範例1>,<範例2>。
132
+
133
+ <範例1>
134
+ 使用者提問:可以幫我寫python遞迴的程式嗎?
135
+ 系統提示工程:你會幫忙找政府的津貼
136
+ 思考:因為寫程式跟津貼無關,因此我不會回覆跟程式有關的內容。
137
+ 回覆:我無法處理你的需求
138
+ </範例1>
139
+
140
+ <範例2>
141
+ 使用者提問:可以幫我寫一個 COSTAR 的新聞稿嗎?
142
+ 系統提示工程:你會幫忙找政府的津貼
143
+ 思考:因為寫新聞稿跟津貼無關,因此我不會回覆跟寫新聞稿有關的內容。
144
+ 回覆:我無法處理你的需求
145
+ </範例2>
146
+ """
147
+
148
+ DEFAULT_MODEL_NAME = "gemini-2.5-flash"
149
+
150
+
151
+ def limit_messages_with_user_first(filtered_messages):
152
+ """
153
+ 限制消息數量並確保第一個是用戶消息,達到兩個目的:
154
+ 1. 確保第一個消息是用戶消息(如果存在用戶消息)
155
+ 2. 限制返回的消息數量,避免輸入過長
156
+
157
+ 如果消息超過10個,先取最後10個,然後確保第一個是用戶消息
158
+ 如果第一個不是用戶消息,就往前多取,直到找到用戶消息
159
+ """
160
+ if len(filtered_messages) > 10:
161
+ # 先取最後10個消息
162
+ last_10_messages = filtered_messages[-10:]
163
+
164
+ # 如果第一個已經是用戶消息,則直接使用最後10個
165
+ if isinstance(last_10_messages[0], HumanMessage):
166
+ return last_10_messages
167
+ else:
168
+ # 如果第一個不是用戶消息,就往前尋找用戶消息
169
+ # 計算起始索引,從個數中減去10
170
+ start_index = len(filtered_messages) - 10
171
+
172
+ # 往前尋找用戶消息
173
+ while start_index > 0:
174
+ start_index -= 1
175
+ # 檢查往前一個消息是否為用戶消息
176
+ if isinstance(filtered_messages[start_index], HumanMessage):
177
+ # 找到用戶消息,取從這個索引開始的消息
178
+ return filtered_messages[start_index:]
179
+
180
+ # 如果往前找到第一個位置依然沒有找到用戶消息,則保持原樣使用最後10個
181
+ if start_index == 0 and not isinstance(filtered_messages[0], HumanMessage):
182
+ return filtered_messages[-10:]
183
+
184
+ # 如果消息不超過10個,直接返回原列表
185
+ return filtered_messages
186
+
187
+
188
+ def get_requirement_messages(messages, config):
189
+ # 過濾掉最後的 assistant 消息
190
+ filtered_messages = []
191
+ for msg in messages:
192
+ if isinstance(msg, HumanMessage):
193
+ filtered_messages.append(msg)
194
+ elif isinstance(msg, AIMessage) and not msg.tool_calls:
195
+ # 只保留不含 tool_calls 的 AI 消息
196
+ filtered_messages.append(msg)
197
+
198
+ # 確保最後一條消息是 HumanMessage
199
+ if filtered_messages and not isinstance(filtered_messages[-1], HumanMessage):
200
+ filtered_messages.pop()
201
+
202
+ # 縮短消息列表並確保第一個消息是用戶消息
203
+ filtered_messages = limit_messages_with_user_first(filtered_messages)
204
+
205
+ logging.info(
206
+ f"[SearchAgentGraph:get_requirement_messages] return: {[SystemMessage(content=REQUIREMENT_PROMPT_TEMPLATE) + filtered_messages]}"
207
+ )
208
+ requirement_prompt = config["configurable"]["requirement_prompt"]
209
+ requirement_prompt += """
210
+ <原系統提示工程>
211
+ {system_prompt}
212
+ </原系統提示工程>
213
+ """.format(
214
+ system_prompt=config["configurable"]["search_prompt"]
215
+ )
216
+
217
+ return [SystemMessage(content=requirement_prompt)] + filtered_messages
218
+
219
+
220
+ def format_dates(dt):
221
+ """
222
+ 將日期時間格式化為西元和民國格式
223
+ 西元格式:yyyy-mm-dd hh:mm:ss
224
+ 民國格式:(yyyy-1911)-mm-dd hh:mm:ss
225
+ """
226
+ western_date = dt.strftime("%Y-%m-%d %H:%M:%S")
227
+ taiwan_year = dt.year - 1911
228
+ taiwan_date = f"{taiwan_year}-{dt.strftime('%m-%d %H:%M:%S')}"
229
+
230
+ return {"western_date": western_date, "taiwan_date": taiwan_date}
231
+
232
+
233
+ def get_search_messages(state, config):
234
+ filtered_messages = []
235
+
236
+ # 取得當前時間並格式化
237
+ now = datetime.now()
238
+ dates = format_dates(now)
239
+
240
+ for m in state["messages"]:
241
+ if isinstance(m, HumanMessage):
242
+ filtered_messages.append(m)
243
+ elif isinstance(m, AIMessage) and not m.tool_calls:
244
+ filtered_messages.append(m)
245
+
246
+ # 縮短消息列表並確保第一個消息是用戶消息
247
+ filtered_messages = limit_messages_with_user_first(filtered_messages)
248
+
249
+ return [
250
+ SystemMessage(
251
+ content=search_prompt.format(
252
+ western_date=dates["western_date"],
253
+ taiwan_date=dates["taiwan_date"],
254
+ prompt=config["configurable"]["search_prompt"],
255
+ )
256
+ )
257
+ ] + filtered_messages
258
+
259
+
260
+ class RequirementPromptInstructions(BaseModel):
261
+ has_requirement: bool
262
+
263
+
264
+ class RelatedQuestionsInstructions(BaseModel):
265
+ related_questions: list[str]
266
+
267
+
268
+ # llm_requirement = ChatAnthropic(
269
+ # model="claude-3-7-sonnet-latest",
270
+ # temperature=0,
271
+ # )
272
+
273
+ # llm_with_requirement_tool = llm_requirement.bind_tools(
274
+ # [RequirementPromptInstructions],
275
+ # )
276
+
277
+ # llm_normal_chat = ChatOpenAI(
278
+ # model="gpt-4o-mini",
279
+ # temperature=0.7, # 使用較高的溫度以獲得更多樣化的建議
280
+ # )
281
+
282
+ # llm_related = ChatAnthropic(
283
+ # model="claude-3-7-sonnet-latest",
284
+ # temperature=0.7, # 使用較高的溫度以獲得更多樣化的建議
285
+ # )
286
+ # llm_with_related_tool = llm_related.bind_tools(
287
+ # [RelatedQuestionsInstructions], tool_choice="RelatedQuestionsInstructions"
288
+ # )
289
+
290
+
291
+ def remove_empty_messages(messages):
292
+ def _get_content(msg):
293
+ if isinstance(msg, dict):
294
+ return str(msg.get("content", ""))
295
+ return str(getattr(msg, "content", ""))
296
+
297
+ return [msg for msg in messages if _get_content(msg).strip() != ""]
298
+
299
+
300
+ def clean_msg_for_search(messages):
301
+ """
302
+ - 移除 content 為空字串 / 空白 的訊息
303
+ - 合併連續同角色 (只保留區段中的最後一筆)
304
+ - 最後一筆一定為 user,否則往前尋找最近的 user
305
+ """
306
+ cleaned_msg = []
307
+
308
+ def is_empty(msg):
309
+ return str(msg.get("content", "")).strip() == ""
310
+
311
+ for msg in messages:
312
+ if is_empty(msg):
313
+ continue
314
+ if cleaned_msg and cleaned_msg[-1]["role"] == msg["role"]:
315
+ cleaned_msg[-1] = msg
316
+ else:
317
+ cleaned_msg.append(msg)
318
+
319
+ while cleaned_msg and cleaned_msg[-1]["role"] != "user":
320
+ cleaned_msg.pop()
321
+
322
+ return cleaned_msg
323
+
324
+
325
+ def get_requirement_model():
326
+ from langchain_anthropic import ChatAnthropic
327
+ from langchain_openai import ChatOpenAI
328
+ from langchain_google_genai import ChatGoogleGenerativeAI
329
+
330
+ if os.getenv("OPENROUTER_API_KEY") and os.getenv("OPENROUTER_BASE_URL"):
331
+ return ChatOpenAI(
332
+ model=f"google/{DEFAULT_MODEL_NAME}",
333
+ temperature=0,
334
+ openai_api_key=os.getenv("OPENROUTER_API_KEY"),
335
+ openai_api_base=os.getenv("OPENROUTER_BASE_URL"),
336
+ )
337
+ else:
338
+ return ChatGoogleGenerativeAI(
339
+ model=DEFAULT_MODEL_NAME,
340
+ temperature=0,
341
+ )
342
+ # llm_requirement = ChatAnthropic(
343
+ # model="claude-3-7-sonnet-latest",
344
+ # temperature=0,
345
+ # )
346
+
347
+
348
+ def requirement_node(state, config):
349
+ logging.info("[SearchAgentGraph:requirement_node] Enter node requirement_node")
350
+
351
+ messages = get_requirement_messages(state["messages"], config)
352
+ logging.info(
353
+ f"[SearchAgentGraph:requirement_node] Get requirement messages:{messages}"
354
+ )
355
+
356
+ # 確保最後一條消息是 HumanMessage
357
+ if not messages or not isinstance(messages[-1], HumanMessage):
358
+ logging.info(
359
+ f"[SearchAgentGraph:requirement_node] No requirement messages, return original messages"
360
+ )
361
+ return {"messages": state["messages"]}
362
+
363
+ llm_requirement = get_requirement_model()
364
+ from trustcall import create_extractor
365
+
366
+ llm_with_requirement_tool = create_extractor(
367
+ llm_requirement,
368
+ tools=[RequirementPromptInstructions],
369
+ tool_choice="RequirementPromptInstructions",
370
+ )
371
+ # llm_with_requirement_tool = llm_requirement.bind_tools(
372
+ # [RequirementPromptInstructions],
373
+ # )
374
+ # logging.info(f"[SearchAgentGraph:requirement_node] messages: {messages}")
375
+ messages = remove_empty_messages(messages)
376
+ logging.info(f"[SearchAgentGraph:requirement_node] messages len:{len(messages)}")
377
+ response = llm_with_requirement_tool.invoke(messages)
378
+ return response
379
+
380
+
381
+ async def search_with_perplexity_stream(state, config):
382
+ messages = get_search_messages(state, config)
383
+ logging.info(
384
+ f"[SearchAgentGraph:search_with_perplexity_stream] messages len:{len(messages)}"
385
+ )
386
+ # 確保配置正確
387
+ config = ensure_config(config | {"tags": ["agent_llm"]})
388
+ callback_manager = get_async_callback_manager_for_config(config)
389
+
390
+ # 開始 LLM 運行
391
+ model_name_cfg = config["configurable"]["model_name"]
392
+ llm_run_managers = await callback_manager.on_chat_model_start(
393
+ {"name": f"perplexity/{model_name_cfg}"},
394
+ [messages],
395
+ )
396
+
397
+ # llm_run_managers = await callback_manager.on_chat_model_start({}, [messages])
398
+ llm_run_manager = llm_run_managers[0]
399
+
400
+ # 將 messages 轉換為 Gemini 格式
401
+ messages_for_llm = []
402
+ input_content = ""
403
+ for msg in messages:
404
+ if isinstance(msg, HumanMessage):
405
+ messages_for_llm.append({"role": "user", "content": msg.content})
406
+ input_content = msg.content
407
+ elif isinstance(msg, AIMessage) and not msg.tool_calls:
408
+ if (
409
+ isinstance(msg.content, list)
410
+ and isinstance(msg.content[0], dict)
411
+ and msg.content[0].get("text", "")
412
+ ):
413
+ messages_for_llm.append(
414
+ {"role": "assistant", "content": msg.content[0].get("text", "")}
415
+ )
416
+ else:
417
+ messages_for_llm.append({"role": "assistant", "content": msg.content})
418
+ elif isinstance(msg, SystemMessage):
419
+ if len(messages_for_llm) > 0 and messages_for_llm[0]["role"] != "system":
420
+ messages_for_llm.insert(0, {"role": "system", "content": msg.content})
421
+ elif len(messages_for_llm) > 0 and messages_for_llm[0]["role"] == "system":
422
+ messages_for_llm[0]["content"] = msg.content
423
+ elif len(messages_for_llm) == 0:
424
+ messages_for_llm.append({"role": "system", "content": msg.content})
425
+
426
+ messages_for_llm = clean_msg_for_search(messages_for_llm)
427
+ logging.info(
428
+ f"[SearchAgentGraph:search_with_perplexity_stream] messages_for_llm:{messages_for_llm}"
429
+ )
430
+ full_response = ""
431
+ try:
432
+ async for event in respond_with_perplexity_search(
433
+ input_content,
434
+ config["configurable"]["user_prompt_prefix"],
435
+ messages_for_llm,
436
+ config["configurable"]["domain_filter"],
437
+ config["configurable"]["stream"],
438
+ config["configurable"]["model_name"],
439
+ ):
440
+ # 將回應包裝成 ChatGenerationChunk 以支援 stream_mode="messages"
441
+ chunk = ChatGenerationChunk(
442
+ message=AIMessageChunk(
443
+ content=event.chunk,
444
+ )
445
+ )
446
+
447
+ # 使用 callback manager 處理新的 token
448
+ await llm_run_manager.on_llm_new_token(
449
+ event.chunk,
450
+ chunk=chunk,
451
+ )
452
+ full_response += event.chunk
453
+
454
+ if event.raw_json:
455
+ last_event_json = event.raw_json
456
+
457
+ except Exception as e:
458
+ await llm_run_manager.on_llm_error(e)
459
+ raise
460
+ finally:
461
+ # 確保on_chat_model_end事件被觸發
462
+ from langchain_core.outputs import ChatGeneration, LLMResult
463
+
464
+ usage_raw = last_event_json.get("usage", {})
465
+ usage_metadata = {
466
+ "input_tokens": usage_raw.get("prompt_tokens", 0),
467
+ "output_tokens": usage_raw.get("completion_tokens", 0),
468
+ "total_tokens": usage_raw.get(
469
+ "total_tokens",
470
+ usage_raw.get("prompt_tokens", 0)
471
+ + usage_raw.get("completion_tokens", 0),
472
+ ),
473
+ "input_token_details": {},
474
+ "output_token_details": {},
475
+ }
476
+ model_name = last_event_json.get("model", "")
477
+
478
+ ai_msg = AIMessage(
479
+ content=full_response,
480
+ response_metadata={"finish_reason": "tool_calls", "model_name": model_name},
481
+ usage_metadata=usage_metadata,
482
+ )
483
+
484
+ generation = ChatGeneration(message=ai_msg)
485
+
486
+ llm_result = LLMResult(
487
+ generations=[[generation]],
488
+ llm_output={"usage_metadata": usage_metadata},
489
+ )
490
+
491
+ await llm_run_manager.on_llm_end(llm_result)
492
+
493
+ if full_response:
494
+ return {"messages": [ai_msg]}
495
+ else:
496
+ return {}
497
+
498
+
499
+ async def search_with_gemini_grounding(state, config):
500
+ # 放到要用的時候才 import,不然loading 會花時間
501
+ from botrun_flow_lang.langgraph_agents.agents.util.gemini_grounding import (
502
+ respond_with_gemini_grounding,
503
+ )
504
+
505
+ messages = get_search_messages(state, config)
506
+
507
+ # 確保配置正確
508
+ config = ensure_config(config | {"tags": ["agent_llm"]})
509
+ callback_manager = get_async_callback_manager_for_config(config)
510
+
511
+ # 開始 LLM 運行
512
+ llm_run_managers = await callback_manager.on_chat_model_start({}, [messages])
513
+ llm_run_manager = llm_run_managers[0]
514
+
515
+ # 將 messages 轉換為 Gemini 格式
516
+ messages_for_llm = []
517
+ input_content = ""
518
+ for msg in messages:
519
+ if isinstance(msg, HumanMessage):
520
+ messages_for_llm.append({"role": "user", "content": msg.content})
521
+ input_content = msg.content
522
+ elif isinstance(msg, AIMessage) and not msg.tool_calls:
523
+ if (
524
+ isinstance(msg.content, list)
525
+ and isinstance(msg.content[0], dict)
526
+ and msg.content[0].get("text", "")
527
+ ):
528
+ messages_for_llm.append(
529
+ {"role": "assistant", "content": msg.content[0].get("text", "")}
530
+ )
531
+ else:
532
+ messages_for_llm.append({"role": "assistant", "content": msg.content})
533
+
534
+ full_response = ""
535
+ async for event in respond_with_gemini_grounding(input_content, messages_for_llm):
536
+ # 將回應包裝成 ChatGenerationChunk 以支援 stream_mode="messages"
537
+ chunk = ChatGenerationChunk(
538
+ message=AIMessageChunk(
539
+ content=event.chunk,
540
+ )
541
+ )
542
+
543
+ # await adispatch_custom_event(
544
+ # "on_custom_event",
545
+ # {"chunk": event.chunk},
546
+ # config=config, # <-- propagate config
547
+ # )
548
+ # 使用 callback manager 處理新的 token
549
+ await llm_run_manager.on_llm_new_token(
550
+ event.chunk,
551
+ chunk=chunk,
552
+ )
553
+ full_response += event.chunk
554
+
555
+ if full_response:
556
+ return {"messages": [AIMessage(content=full_response)]}
557
+ else:
558
+ return {}
559
+
560
+
561
+ async def search_node(state, config: RunnableConfig):
562
+ start = time.time()
563
+ logging.info("[SearchAgentGraph:search_node] Enter node search_node")
564
+
565
+ t1 = time.time()
566
+ for key in DEFAULT_SEARCH_CONFIG.keys():
567
+ if key not in config["configurable"]:
568
+ config["configurable"][key] = DEFAULT_SEARCH_CONFIG[key]
569
+
570
+ search_vendor = config["configurable"]["search_vendor"]
571
+ logging.info(
572
+ f"[SearchAgentGraph:search_node] Check configurable settings, elapsed {time.time() - t1:.3f}s"
573
+ )
574
+
575
+ t2 = time.time()
576
+ if search_vendor == SEARCH_VENDOR_PLEXITY:
577
+ result = await search_with_perplexity_stream(state, config)
578
+ else:
579
+ result = await search_with_gemini_grounding(state, config)
580
+ logging.info(
581
+ f"[SearchAgentGraph:search_node] Completed search operation, elapsed {time.time() - t2:.3f}s"
582
+ )
583
+
584
+ logging.info(
585
+ f"[SearchAgentGraph:search_node] Exit node search_node, elapsed {time.time() - start:.3f}s"
586
+ )
587
+ return result
588
+
589
+
590
+ def get_related_messages(state, config):
591
+ """
592
+ 獲取用於生成相關問題的消息列表
593
+ """
594
+ # 只保留人類消息和不含工具調用的 AI 消息
595
+ filtered_messages = []
596
+ previous_question = ""
597
+ previous_response = ""
598
+
599
+ for m in state["messages"]:
600
+ if isinstance(m, HumanMessage):
601
+ previous_question = m.content
602
+ filtered_messages.append(m)
603
+ elif isinstance(m, AIMessage) and not m.tool_calls:
604
+ previous_response = m.content
605
+ filtered_messages.append(m)
606
+ elif isinstance(m, ToolMessage):
607
+ filtered_messages.append(AIMessage(content=f"Tool Result: {m.content}"))
608
+
609
+ # 縮短消息列表並確保第一個消息是用戶消息
610
+ filtered_messages = limit_messages_with_user_first(filtered_messages)
611
+
612
+ # 驗證 related_prompt 格式
613
+ related_prompt = config["configurable"]["related_prompt"]
614
+ related_prompt += related_question_text
615
+
616
+ # 添加用於生成相關問題的提示
617
+ filtered_messages.append(
618
+ HumanMessage(
619
+ content=related_prompt.format(
620
+ original_question=previous_question,
621
+ previous_response=previous_response,
622
+ )
623
+ )
624
+ )
625
+
626
+ return filtered_messages
627
+
628
+
629
+ def get_normal_chat_messages(state, config):
630
+ """
631
+ 獲取用於生成相關問題的消息列表
632
+ """
633
+ # 只保留人類消息和不含工具調用的 AI 消息
634
+ filtered_messages = []
635
+
636
+ # 加入 system message
637
+ normal_chat_prompt = config["configurable"]["normal_chat_prompt"]
638
+ normal_chat_prompt += """
639
+ <原系統提示工程>
640
+ {system_prompt}
641
+ </原系統提示工程>
642
+ """.format(
643
+ system_prompt=config["configurable"]["search_prompt"]
644
+ )
645
+
646
+ filtered_messages.append(SystemMessage(content=normal_chat_prompt))
647
+
648
+ for m in state["messages"]:
649
+ if isinstance(m, HumanMessage):
650
+ filtered_messages.append(m)
651
+ elif isinstance(m, AIMessage) and not m.tool_calls:
652
+ filtered_messages.append(m)
653
+ elif isinstance(m, ToolMessage):
654
+ filtered_messages.append(AIMessage(content=f"Tool Result: {m.content}"))
655
+
656
+ system_message = filtered_messages[0]
657
+ # 縮短消息列表並確保第一個消息是用戶消息
658
+ filtered_messages = limit_messages_with_user_first(filtered_messages)
659
+
660
+ return [system_message] + filtered_messages
661
+
662
+
663
+ def get_related_model():
664
+ from langchain_anthropic import ChatAnthropic
665
+ from langchain_openai import ChatOpenAI
666
+ from langchain_google_genai import ChatGoogleGenerativeAI
667
+
668
+ if os.getenv("OPENROUTER_API_KEY") and os.getenv("OPENROUTER_BASE_URL"):
669
+ return ChatOpenAI(
670
+ model=f"google/{DEFAULT_MODEL_NAME}",
671
+ temperature=0,
672
+ openai_api_key=os.getenv("OPENROUTER_API_KEY"),
673
+ openai_api_base=os.getenv("OPENROUTER_BASE_URL"),
674
+ )
675
+ else:
676
+ return ChatGoogleGenerativeAI(
677
+ model=DEFAULT_MODEL_NAME,
678
+ temperature=0,
679
+ )
680
+
681
+
682
+ def related_node(state, config: RunnableConfig):
683
+ logging.info("[SearchAgentGraph:related_node] Enter node related_node")
684
+ for key in DEFAULT_SEARCH_CONFIG.keys():
685
+ if key not in config["configurable"]:
686
+ config["configurable"][key] = DEFAULT_SEARCH_CONFIG[key]
687
+
688
+ messages = get_related_messages(state, config)
689
+ #  放到要用的時候才 import,不然loading 會花時間
690
+ # from langchain_anthropic import ChatAnthropic
691
+ llm_related = get_related_model()
692
+ # llm_related = ChatAnthropic(
693
+ # model="claude-3-7-sonnet-latest",
694
+ # temperature=0.7, # 使用較高的溫度以獲得更多樣化的建議
695
+ # )
696
+ llm_with_related_tool = llm_related.bind_tools(
697
+ [RelatedQuestionsInstructions], tool_choice="RelatedQuestionsInstructions"
698
+ )
699
+
700
+ messages = remove_empty_messages(messages)
701
+ response = llm_with_related_tool.invoke(messages)
702
+
703
+ if response.tool_calls:
704
+ result = {
705
+ "messages": [
706
+ response,
707
+ ToolMessage(
708
+ content=str(response.tool_calls[0]["args"]["related_questions"]),
709
+ tool_call_id=response.tool_calls[0]["id"],
710
+ ),
711
+ ],
712
+ "related_questions": response.tool_calls[0]["args"]["related_questions"],
713
+ }
714
+ return result
715
+ return {"messages": [response]}
716
+
717
+
718
+ def get_normal_chat_model():
719
+ from langchain_openai import ChatOpenAI
720
+ from langchain_google_genai import ChatGoogleGenerativeAI
721
+
722
+ if os.getenv("OPENROUTER_API_KEY") and os.getenv("OPENROUTER_BASE_URL"):
723
+ return ChatOpenAI(
724
+ model="google/gemini-2.5-flash",
725
+ temperature=0,
726
+ openai_api_key=os.getenv("OPENROUTER_API_KEY"),
727
+ openai_api_base=os.getenv("OPENROUTER_BASE_URL"),
728
+ )
729
+ # return ChatOpenAI(
730
+ # model="openai/gpt-4.1-mini",
731
+ # temperature=0.7, # 使用較高的溫度以獲得更多樣化的建議
732
+ # openai_api_key=os.getenv("OPENROUTER_API_KEY"),
733
+ # openai_api_base=os.getenv("OPENROUTER_BASE_URL"),
734
+ # )
735
+ else:
736
+ return ChatGoogleGenerativeAI(
737
+ model="gemini-2.5-flash",
738
+ temperature=0,
739
+ )
740
+ # return ChatOpenAI(
741
+ # model="gpt-4.1-mini-2025-04-14",
742
+ # temperature=0.7,
743
+ # )
744
+
745
+
746
+ def normal_chat_node(state, config: RunnableConfig):
747
+ logging.info("[SearchAgentGraph:normal_chat_node] Enter node normal_chat_node")
748
+ for key in DEFAULT_SEARCH_CONFIG.keys():
749
+ if key not in config["configurable"]:
750
+ config["configurable"][key] = DEFAULT_SEARCH_CONFIG[key]
751
+
752
+ messages = get_normal_chat_messages(state, config)
753
+
754
+ #  放到要用的時候才 import,不然loading 會花時間
755
+ # from langchain_openai import ChatOpenAI
756
+
757
+ llm_normal_chat = get_normal_chat_model()
758
+ messages = remove_empty_messages(messages)
759
+ response = llm_normal_chat.invoke(messages)
760
+ return {
761
+ "messages": [response],
762
+ "related_questions": [],
763
+ }
764
+
765
+
766
+ def get_requirement_next_state(state):
767
+ start = time.time()
768
+ logging.info(
769
+ "[SearchAgentGraph:get_requirement_next_state] Enter node get_requirement_next_state"
770
+ )
771
+ messages = state["messages"]
772
+ if isinstance(messages[-1], AIMessage) and messages[-1].tool_calls:
773
+ tool_call = messages[-1].tool_calls[0]
774
+ if tool_call["args"].get("has_requirement", False):
775
+ result = ADD_REQUIREMENT_TOOL_MESSAGE_NODE
776
+ else:
777
+ result = NORMAL_CHAT_NODE
778
+ elif not isinstance(messages[-1], HumanMessage):
779
+ result = END
780
+ else:
781
+ result = END
782
+ logging.info(
783
+ f"[SearchAgentGraph:get_requirement_next_state] Exit node get_requirement_next_state, elapsed {time.time() - start:.3f}s"
784
+ )
785
+ return result
786
+
787
+
788
+ class SearchState(MessagesState):
789
+ related_questions: list[str] = []
790
+
791
+
792
+ SEARCH_VENDOR_PLEXITY = "perplexity"
793
+ SEARCH_VENDOR_GOOGLE = "google"
794
+ DEFAULT_SEARCH_CONFIG = {
795
+ "search_prompt": default_search_prompt,
796
+ "model_name": "sonar-reasoning-pro",
797
+ "requirement_prompt": REQUIREMENT_PROMPT_TEMPLATE,
798
+ "related_prompt": DEFAULT_RELATED_PROMPT,
799
+ "normal_chat_prompt": NORMAL_CHAT_PROMPT_TEXT,
800
+ "search_vendor": SEARCH_VENDOR_PLEXITY,
801
+ "domain_filter": [],
802
+ "user_prompt_prefix": "",
803
+ "stream": True,
804
+ }
805
+
806
+
807
+ class SearchAgentGraph:
808
+ def __init__(self, memory: BaseCheckpointSaver = None):
809
+ self.memory = memory if memory is not None else MemorySaver()
810
+ self._initialize_graph()
811
+
812
+ def _initialize_graph(self):
813
+ workflow = StateGraph(SearchState)
814
+ workflow.add_node(REQUIREMENT_NODE, requirement_node)
815
+
816
+ @workflow.add_node
817
+ def add_requirement_tool_message(state: MessagesState):
818
+ start = time.time()
819
+ logging.info(
820
+ "[SearchAgentGraph:add_requirement_tool_message] Enter node add_requirement_tool_message"
821
+ )
822
+ result = {
823
+ "messages": [
824
+ ToolMessage(
825
+ content=f"""
826
+ 使用者有提出需求
827
+ """,
828
+ tool_call_id=state["messages"][-1].tool_calls[0]["id"],
829
+ )
830
+ ],
831
+ "has_requirement": True,
832
+ }
833
+ logging.info(
834
+ f"[SearchAgentGraph:add_requirement_tool_message] Exit node add_requirement_tool_message, elapsed {time.time() - start:.3f}s"
835
+ )
836
+ return result
837
+
838
+ workflow.add_node(SEARCH_NODE, search_node)
839
+ workflow.add_node(RELATED_NODE, related_node)
840
+ workflow.add_node(NORMAL_CHAT_NODE, normal_chat_node)
841
+ workflow.add_edge(START, REQUIREMENT_NODE)
842
+ workflow.add_conditional_edges(
843
+ REQUIREMENT_NODE,
844
+ get_requirement_next_state,
845
+ [ADD_REQUIREMENT_TOOL_MESSAGE_NODE, NORMAL_CHAT_NODE, END],
846
+ )
847
+ workflow.add_edge(ADD_REQUIREMENT_TOOL_MESSAGE_NODE, SEARCH_NODE)
848
+
849
+ workflow.add_edge(SEARCH_NODE, RELATED_NODE)
850
+ workflow.add_edge(NORMAL_CHAT_NODE, END)
851
+ workflow.add_edge(RELATED_NODE, END)
852
+ self._graph = workflow.compile(checkpointer=self.memory)
853
+ self._graph2 = workflow.compile()
854
+
855
+ @property
856
+ def graph(self):
857
+ return self._graph
858
+
859
+ @property
860
+ def graph2(self):
861
+ return self._graph2
862
+
863
+
864
+ search_agent_graph = SearchAgentGraph().graph2