botrun-flow-lang 5.12.263__py3-none-any.whl → 6.2.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. botrun_flow_lang/api/auth_api.py +39 -39
  2. botrun_flow_lang/api/auth_utils.py +183 -183
  3. botrun_flow_lang/api/botrun_back_api.py +65 -65
  4. botrun_flow_lang/api/flow_api.py +3 -3
  5. botrun_flow_lang/api/hatch_api.py +508 -508
  6. botrun_flow_lang/api/langgraph_api.py +816 -811
  7. botrun_flow_lang/api/langgraph_constants.py +11 -0
  8. botrun_flow_lang/api/line_bot_api.py +1484 -1484
  9. botrun_flow_lang/api/model_api.py +300 -300
  10. botrun_flow_lang/api/rate_limit_api.py +32 -32
  11. botrun_flow_lang/api/routes.py +79 -79
  12. botrun_flow_lang/api/search_api.py +53 -53
  13. botrun_flow_lang/api/storage_api.py +395 -395
  14. botrun_flow_lang/api/subsidy_api.py +290 -290
  15. botrun_flow_lang/api/subsidy_api_system_prompt.txt +109 -109
  16. botrun_flow_lang/api/user_setting_api.py +70 -70
  17. botrun_flow_lang/api/version_api.py +31 -31
  18. botrun_flow_lang/api/youtube_api.py +26 -26
  19. botrun_flow_lang/constants.py +13 -13
  20. botrun_flow_lang/langgraph_agents/agents/agent_runner.py +178 -178
  21. botrun_flow_lang/langgraph_agents/agents/agent_tools/step_planner.py +77 -77
  22. botrun_flow_lang/langgraph_agents/agents/checkpointer/firestore_checkpointer.py +666 -666
  23. botrun_flow_lang/langgraph_agents/agents/gov_researcher/GOV_RESEARCHER_PRD.md +192 -192
  24. botrun_flow_lang/langgraph_agents/agents/gov_researcher/gemini_subsidy_graph.py +460 -460
  25. botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_2_graph.py +1002 -1002
  26. botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_graph.py +822 -822
  27. botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py +730 -723
  28. botrun_flow_lang/langgraph_agents/agents/search_agent_graph.py +864 -864
  29. botrun_flow_lang/langgraph_agents/agents/tools/__init__.py +4 -4
  30. botrun_flow_lang/langgraph_agents/agents/tools/gemini_code_execution.py +376 -376
  31. botrun_flow_lang/langgraph_agents/agents/util/gemini_grounding.py +66 -66
  32. botrun_flow_lang/langgraph_agents/agents/util/html_util.py +316 -316
  33. botrun_flow_lang/langgraph_agents/agents/util/img_util.py +336 -294
  34. botrun_flow_lang/langgraph_agents/agents/util/local_files.py +419 -419
  35. botrun_flow_lang/langgraph_agents/agents/util/mermaid_util.py +86 -86
  36. botrun_flow_lang/langgraph_agents/agents/util/model_utils.py +143 -143
  37. botrun_flow_lang/langgraph_agents/agents/util/pdf_analyzer.py +562 -486
  38. botrun_flow_lang/langgraph_agents/agents/util/pdf_cache.py +250 -250
  39. botrun_flow_lang/langgraph_agents/agents/util/pdf_processor.py +204 -204
  40. botrun_flow_lang/langgraph_agents/agents/util/perplexity_search.py +464 -464
  41. botrun_flow_lang/langgraph_agents/agents/util/plotly_util.py +59 -59
  42. botrun_flow_lang/langgraph_agents/agents/util/tavily_search.py +199 -199
  43. botrun_flow_lang/langgraph_agents/agents/util/usage_metadata.py +34 -0
  44. botrun_flow_lang/langgraph_agents/agents/util/youtube_util.py +90 -90
  45. botrun_flow_lang/langgraph_agents/cache/langgraph_botrun_cache.py +197 -197
  46. botrun_flow_lang/llm_agent/llm_agent.py +19 -19
  47. botrun_flow_lang/llm_agent/llm_agent_util.py +83 -83
  48. botrun_flow_lang/log/.gitignore +2 -2
  49. botrun_flow_lang/main.py +61 -61
  50. botrun_flow_lang/main_fast.py +51 -51
  51. botrun_flow_lang/mcp_server/__init__.py +10 -10
  52. botrun_flow_lang/mcp_server/default_mcp.py +854 -744
  53. botrun_flow_lang/models/nodes/utils.py +205 -205
  54. botrun_flow_lang/models/token_usage.py +34 -34
  55. botrun_flow_lang/requirements.txt +21 -21
  56. botrun_flow_lang/services/base/firestore_base.py +30 -30
  57. botrun_flow_lang/services/hatch/hatch_factory.py +11 -11
  58. botrun_flow_lang/services/hatch/hatch_fs_store.py +419 -419
  59. botrun_flow_lang/services/storage/storage_cs_store.py +206 -206
  60. botrun_flow_lang/services/storage/storage_factory.py +12 -12
  61. botrun_flow_lang/services/storage/storage_store.py +65 -65
  62. botrun_flow_lang/services/user_setting/user_setting_factory.py +9 -9
  63. botrun_flow_lang/services/user_setting/user_setting_fs_store.py +66 -66
  64. botrun_flow_lang/static/docs/tools/index.html +926 -926
  65. botrun_flow_lang/tests/api_functional_tests.py +1525 -1525
  66. botrun_flow_lang/tests/api_stress_test.py +357 -357
  67. botrun_flow_lang/tests/shared_hatch_tests.py +333 -333
  68. botrun_flow_lang/tests/test_botrun_app.py +46 -46
  69. botrun_flow_lang/tests/test_html_util.py +31 -31
  70. botrun_flow_lang/tests/test_img_analyzer.py +190 -190
  71. botrun_flow_lang/tests/test_img_util.py +39 -39
  72. botrun_flow_lang/tests/test_local_files.py +114 -114
  73. botrun_flow_lang/tests/test_mermaid_util.py +103 -103
  74. botrun_flow_lang/tests/test_pdf_analyzer.py +104 -104
  75. botrun_flow_lang/tests/test_plotly_util.py +151 -151
  76. botrun_flow_lang/tests/test_run_workflow_engine.py +65 -65
  77. botrun_flow_lang/tools/generate_docs.py +133 -133
  78. botrun_flow_lang/tools/templates/tools.html +153 -153
  79. botrun_flow_lang/utils/__init__.py +7 -7
  80. botrun_flow_lang/utils/botrun_logger.py +344 -344
  81. botrun_flow_lang/utils/clients/rate_limit_client.py +209 -209
  82. botrun_flow_lang/utils/clients/token_verify_client.py +153 -153
  83. botrun_flow_lang/utils/google_drive_utils.py +654 -654
  84. botrun_flow_lang/utils/langchain_utils.py +324 -324
  85. botrun_flow_lang/utils/yaml_utils.py +9 -9
  86. {botrun_flow_lang-5.12.263.dist-info → botrun_flow_lang-6.2.21.dist-info}/METADATA +6 -6
  87. botrun_flow_lang-6.2.21.dist-info/RECORD +104 -0
  88. botrun_flow_lang-5.12.263.dist-info/RECORD +0 -102
  89. {botrun_flow_lang-5.12.263.dist-info → botrun_flow_lang-6.2.21.dist-info}/WHEEL +0 -0
@@ -1,178 +1,178 @@
1
- from typing import AsyncGenerator, Dict, List, Optional, Union, Any
2
- import logging
3
-
4
- from pydantic import BaseModel
5
-
6
-
7
- class StepsUpdateEvent(BaseModel):
8
- """
9
- for step in steps:
10
- print("Description:", step.get("description", ""))
11
- print("Status:", step.get("status", ""))
12
- print("Updates:", step.get("updates", ""))
13
- """
14
-
15
- steps: List = []
16
-
17
-
18
- class OnNodeStreamEvent(BaseModel):
19
- chunk: str
20
-
21
-
22
- class ChatModelEndEvent(BaseModel):
23
- """
24
- Chat Model End Event 資料模型
25
- 提供 on_chat_model_end 事件的原始資料,供呼叫端自行處理
26
- """
27
-
28
- # 原始事件資料
29
- raw_output: Any = None # 來自 event["data"]["output"]
30
- raw_input: Any = None # 來自 event["data"]["input"]
31
-
32
- # 基本 metadata
33
- langgraph_node: str = ""
34
- usage_metadata: Dict = {}
35
- model_name: str = ""
36
-
37
- # chunk
38
- chunk: str = ""
39
-
40
-
41
- MAX_RECURSION_LIMIT = 25
42
-
43
-
44
- # graph 是 CompiledStateGraph,不傳入型別的原因是,loading import 需要 0.5秒
45
- async def langgraph_runner(
46
- thread_id: str,
47
- init_state: dict,
48
- graph,
49
- need_resume: bool = False,
50
- extra_config: Optional[Dict] = None,
51
- ) -> AsyncGenerator:
52
- """
53
- 這個 function 與 agent_runner 的差別在於,langgraph_runner 是回傳原原本本 LangGraph 的 event,而 agent_runner 是回傳 LangGraph 的 event 經過處理後的 event。
54
- """
55
- invoke_state = init_state
56
- config = {
57
- "configurable": {"thread_id": thread_id},
58
- "recursion_limit": MAX_RECURSION_LIMIT,
59
- }
60
- if extra_config:
61
- config["configurable"].update(extra_config)
62
- if need_resume:
63
- state_history = []
64
- async for state in graph.aget_state_history(config):
65
- state_history.append(state)
66
-
67
- # 如果 state_history 的長度超過 MAX_RECURSION_LIMIT,動態調整 recursion_limit
68
- if len(state_history) > MAX_RECURSION_LIMIT:
69
- # 計算超出的倍數
70
- multiplier = (len(state_history) - 1) // MAX_RECURSION_LIMIT
71
- # 設定新的 recursion_limit 為 (multiplier + 1) * MAX_RECURSION_LIMIT
72
- config["recursion_limit"] = (multiplier + 1) * MAX_RECURSION_LIMIT
73
-
74
- try:
75
- async for event in graph.astream_events(
76
- invoke_state,
77
- config,
78
- version="v2",
79
- ):
80
- yield event
81
- except Exception as e:
82
- # 捕獲 SSE 流讀取錯誤(如 httpcore.ReadError)
83
- import logging
84
- logging.error(f"Error reading SSE stream: {e}", exc_info=True)
85
- # 產生錯誤 event 讓調用者知道
86
- yield {"error": f"SSE stream error: {str(e)}"}
87
-
88
-
89
- # graph 是 CompiledStateGraph,不傳入型別的原因是,loading import 需要 0.5秒
90
- async def agent_runner(
91
- thread_id: str,
92
- init_state: dict,
93
- graph,
94
- need_resume: bool = False,
95
- extra_config: Optional[Dict] = None,
96
- ) -> AsyncGenerator[
97
- Union[StepsUpdateEvent, OnNodeStreamEvent, ChatModelEndEvent], None
98
- ]:
99
- invoke_state = init_state
100
- config = {
101
- "configurable": {"thread_id": thread_id},
102
- "recursion_limit": MAX_RECURSION_LIMIT,
103
- }
104
- if extra_config:
105
- config["configurable"].update(extra_config)
106
- if need_resume:
107
- state_history = []
108
- async for state in graph.aget_state_history(config):
109
- state_history.append(state)
110
-
111
- # 如果 state_history 的長度超過 MAX_RECURSION_LIMIT,動態調整 recursion_limit
112
- if len(state_history) > MAX_RECURSION_LIMIT:
113
- # 計算超出的倍數
114
- multiplier = (len(state_history) - 1) // MAX_RECURSION_LIMIT
115
- # 設定新的 recursion_limit 為 (multiplier + 1) * MAX_RECURSION_LIMIT
116
- config["recursion_limit"] = (multiplier + 1) * MAX_RECURSION_LIMIT
117
-
118
- async for event in graph.astream_events(
119
- invoke_state,
120
- config,
121
- version="v2",
122
- ):
123
- if event["event"] == "on_chain_end":
124
- pass
125
- if event["event"] == "on_chat_model_end":
126
- data = event.get("data", {})
127
- logging.info(f"[Agent Runner] on_chat_model_end data: {data}")
128
- metadata = event.get("metadata", {})
129
- langgraph_node = metadata.get("langgraph_node", {})
130
- output = data.get("output", {})
131
-
132
- # 提取常用的 metadata(為了向後相容性)
133
- usage_metadata = {}
134
- model_name = ""
135
-
136
- if hasattr(output, "usage_metadata"):
137
- usage_metadata = output.usage_metadata if output.usage_metadata else {}
138
-
139
- if hasattr(output, "response_metadata"):
140
- model_name = output.response_metadata.get("model_name", "")
141
-
142
- chat_model_end_event = ChatModelEndEvent(
143
- raw_output=data.get("output", {}),
144
- raw_input=data.get("input", {}),
145
- langgraph_node=langgraph_node,
146
- usage_metadata=usage_metadata,
147
- model_name=model_name,
148
- )
149
- yield chat_model_end_event
150
- if event["event"] == "on_chat_model_stream":
151
- data = event["data"]
152
- if (
153
- data["chunk"].content
154
- and isinstance(data["chunk"].content[0], dict)
155
- and data["chunk"].content[0].get("text", "")
156
- ):
157
- yield OnNodeStreamEvent(chunk=data["chunk"].content[0].get("text", ""))
158
- elif data["chunk"].content and isinstance(data["chunk"].content, str):
159
- yield OnNodeStreamEvent(chunk=data["chunk"].content)
160
-
161
-
162
- # def handle_copilotkit_intermediate_state(event: dict):
163
- # print("Handling copilotkit intermediate state")
164
- # copilotkit_intermediate_state = event["metadata"].get(
165
- # "copilotkit:emit-intermediate-state"
166
- # )
167
- # print(f"Intermediate state: {copilotkit_intermediate_state}")
168
- # if copilotkit_intermediate_state:
169
- # for intermediate_state in copilotkit_intermediate_state:
170
- # if intermediate_state.get("state_key", "") == "steps":
171
- # for tool_call in event["data"]["output"].tool_calls:
172
- # if tool_call.get("name", "") == intermediate_state.get("tool", ""):
173
- # steps = tool_call["args"].get(
174
- # intermediate_state.get("tool_argument")
175
- # )
176
- # print(f"Yielding steps: {steps}")
177
- # yield StepsUpdateEvent(steps=steps)
178
- # print("--------------------------------")
1
+ from typing import AsyncGenerator, Dict, List, Optional, Union, Any
2
+ import logging
3
+
4
+ from pydantic import BaseModel
5
+
6
+
7
+ class StepsUpdateEvent(BaseModel):
8
+ """
9
+ for step in steps:
10
+ print("Description:", step.get("description", ""))
11
+ print("Status:", step.get("status", ""))
12
+ print("Updates:", step.get("updates", ""))
13
+ """
14
+
15
+ steps: List = []
16
+
17
+
18
+ class OnNodeStreamEvent(BaseModel):
19
+ chunk: str
20
+
21
+
22
+ class ChatModelEndEvent(BaseModel):
23
+ """
24
+ Chat Model End Event 資料模型
25
+ 提供 on_chat_model_end 事件的原始資料,供呼叫端自行處理
26
+ """
27
+
28
+ # 原始事件資料
29
+ raw_output: Any = None # 來自 event["data"]["output"]
30
+ raw_input: Any = None # 來自 event["data"]["input"]
31
+
32
+ # 基本 metadata
33
+ langgraph_node: str = ""
34
+ usage_metadata: Dict = {}
35
+ model_name: str = ""
36
+
37
+ # chunk
38
+ chunk: str = ""
39
+
40
+
41
+ MAX_RECURSION_LIMIT = 25
42
+
43
+
44
+ # graph 是 CompiledStateGraph,不傳入型別的原因是,loading import 需要 0.5秒
45
+ async def langgraph_runner(
46
+ thread_id: str,
47
+ init_state: dict,
48
+ graph,
49
+ need_resume: bool = False,
50
+ extra_config: Optional[Dict] = None,
51
+ ) -> AsyncGenerator:
52
+ """
53
+ 這個 function 與 agent_runner 的差別在於,langgraph_runner 是回傳原原本本 LangGraph 的 event,而 agent_runner 是回傳 LangGraph 的 event 經過處理後的 event。
54
+ """
55
+ invoke_state = init_state
56
+ config = {
57
+ "configurable": {"thread_id": thread_id},
58
+ "recursion_limit": MAX_RECURSION_LIMIT,
59
+ }
60
+ if extra_config:
61
+ config["configurable"].update(extra_config)
62
+ if need_resume:
63
+ state_history = []
64
+ async for state in graph.aget_state_history(config):
65
+ state_history.append(state)
66
+
67
+ # 如果 state_history 的長度超過 MAX_RECURSION_LIMIT,動態調整 recursion_limit
68
+ if len(state_history) > MAX_RECURSION_LIMIT:
69
+ # 計算超出的倍數
70
+ multiplier = (len(state_history) - 1) // MAX_RECURSION_LIMIT
71
+ # 設定新的 recursion_limit 為 (multiplier + 1) * MAX_RECURSION_LIMIT
72
+ config["recursion_limit"] = (multiplier + 1) * MAX_RECURSION_LIMIT
73
+
74
+ try:
75
+ async for event in graph.astream_events(
76
+ invoke_state,
77
+ config,
78
+ version="v2",
79
+ ):
80
+ yield event
81
+ except Exception as e:
82
+ # 捕獲 SSE 流讀取錯誤(如 httpcore.ReadError)
83
+ import logging
84
+ logging.error(f"Error reading SSE stream: {e}", exc_info=True)
85
+ # 產生錯誤 event 讓調用者知道
86
+ yield {"error": f"SSE stream error: {str(e)}"}
87
+
88
+
89
+ # graph 是 CompiledStateGraph,不傳入型別的原因是,loading import 需要 0.5秒
90
+ async def agent_runner(
91
+ thread_id: str,
92
+ init_state: dict,
93
+ graph,
94
+ need_resume: bool = False,
95
+ extra_config: Optional[Dict] = None,
96
+ ) -> AsyncGenerator[
97
+ Union[StepsUpdateEvent, OnNodeStreamEvent, ChatModelEndEvent], None
98
+ ]:
99
+ invoke_state = init_state
100
+ config = {
101
+ "configurable": {"thread_id": thread_id},
102
+ "recursion_limit": MAX_RECURSION_LIMIT,
103
+ }
104
+ if extra_config:
105
+ config["configurable"].update(extra_config)
106
+ if need_resume:
107
+ state_history = []
108
+ async for state in graph.aget_state_history(config):
109
+ state_history.append(state)
110
+
111
+ # 如果 state_history 的長度超過 MAX_RECURSION_LIMIT,動態調整 recursion_limit
112
+ if len(state_history) > MAX_RECURSION_LIMIT:
113
+ # 計算超出的倍數
114
+ multiplier = (len(state_history) - 1) // MAX_RECURSION_LIMIT
115
+ # 設定新的 recursion_limit 為 (multiplier + 1) * MAX_RECURSION_LIMIT
116
+ config["recursion_limit"] = (multiplier + 1) * MAX_RECURSION_LIMIT
117
+
118
+ async for event in graph.astream_events(
119
+ invoke_state,
120
+ config,
121
+ version="v2",
122
+ ):
123
+ if event["event"] == "on_chain_end":
124
+ pass
125
+ if event["event"] == "on_chat_model_end":
126
+ data = event.get("data", {})
127
+ logging.info(f"[Agent Runner] on_chat_model_end data: {data}")
128
+ metadata = event.get("metadata", {})
129
+ langgraph_node = metadata.get("langgraph_node", {})
130
+ output = data.get("output", {})
131
+
132
+ # 提取常用的 metadata(為了向後相容性)
133
+ usage_metadata = {}
134
+ model_name = ""
135
+
136
+ if hasattr(output, "usage_metadata"):
137
+ usage_metadata = output.usage_metadata if output.usage_metadata else {}
138
+
139
+ if hasattr(output, "response_metadata"):
140
+ model_name = output.response_metadata.get("model_name", "")
141
+
142
+ chat_model_end_event = ChatModelEndEvent(
143
+ raw_output=data.get("output", {}),
144
+ raw_input=data.get("input", {}),
145
+ langgraph_node=langgraph_node,
146
+ usage_metadata=usage_metadata,
147
+ model_name=model_name,
148
+ )
149
+ yield chat_model_end_event
150
+ if event["event"] == "on_chat_model_stream":
151
+ data = event["data"]
152
+ if (
153
+ data["chunk"].content
154
+ and isinstance(data["chunk"].content[0], dict)
155
+ and data["chunk"].content[0].get("text", "")
156
+ ):
157
+ yield OnNodeStreamEvent(chunk=data["chunk"].content[0].get("text", ""))
158
+ elif data["chunk"].content and isinstance(data["chunk"].content, str):
159
+ yield OnNodeStreamEvent(chunk=data["chunk"].content)
160
+
161
+
162
+ # def handle_copilotkit_intermediate_state(event: dict):
163
+ # print("Handling copilotkit intermediate state")
164
+ # copilotkit_intermediate_state = event["metadata"].get(
165
+ # "copilotkit:emit-intermediate-state"
166
+ # )
167
+ # print(f"Intermediate state: {copilotkit_intermediate_state}")
168
+ # if copilotkit_intermediate_state:
169
+ # for intermediate_state in copilotkit_intermediate_state:
170
+ # if intermediate_state.get("state_key", "") == "steps":
171
+ # for tool_call in event["data"]["output"].tool_calls:
172
+ # if tool_call.get("name", "") == intermediate_state.get("tool", ""):
173
+ # steps = tool_call["args"].get(
174
+ # intermediate_state.get("tool_argument")
175
+ # )
176
+ # print(f"Yielding steps: {steps}")
177
+ # yield StepsUpdateEvent(steps=steps)
178
+ # print("--------------------------------")
@@ -1,77 +1,77 @@
1
- from langchain_core.tools import tool
2
- import litellm
3
- import json
4
-
5
- SYSTEM_PROMPT = """你是一個專業的研究規劃助手。你的工作是:
6
- 1. 分析使用者的研究需求
7
- 2. 規劃完整的研究步驟
8
- 3. 如果使用者指定特定步驟,你要規劃如何執行這些步驟
9
-
10
- 你必須嚴格按照以下 JSON 格式回覆,不要加入任何其他文字:
11
- {
12
- "analysis": "對使用者需求的分析",
13
- "steps": [
14
- {
15
- "step": "步驟1",
16
- "description": "詳細說明",
17
- "expected_outcome": "預期結果"
18
- }
19
- ]
20
- }"""
21
-
22
- JSON_SCHEMA = {
23
- "type": "object",
24
- "properties": {
25
- "analysis": {"type": "string", "description": "對使用者需求的分析"},
26
- "steps": {
27
- "type": "array",
28
- "items": {
29
- "type": "object",
30
- "properties": {
31
- "step": {"type": "string"},
32
- "description": {"type": "string"},
33
- "expected_outcome": {"type": "string"},
34
- },
35
- "required": ["step", "description", "expected_outcome"],
36
- },
37
- },
38
- },
39
- "required": ["analysis", "steps"],
40
- }
41
-
42
-
43
- @tool
44
- def step_planner(user_input: str) -> str:
45
- """
46
- 研究規劃工具 - 負責規劃研究步驟和執行計劃
47
-
48
- 這個工具會:
49
- 1. 分析使用者的研究需求
50
- 2. 根據現有工具規劃完整的研究步驟
51
- 3. 處理使用者指定的執行步驟
52
-
53
- Args:
54
- user_input (str): 使用者的研究需求或指定步驟
55
-
56
- Returns:
57
- str: JSON 格式的研究計劃,包含分析和詳細步驟
58
- """
59
-
60
- print("step_planner user_input============>", user_input)
61
- response = litellm.completion(
62
- model="o3-mini",
63
- messages=[
64
- {"role": "system", "content": SYSTEM_PROMPT},
65
- {"role": "user", "content": user_input},
66
- ],
67
- response_format={"type": "json_object", "schema": JSON_SCHEMA},
68
- reasoning_effort="high",
69
- )
70
-
71
- try:
72
- # 確保回應是有效的 JSON 格式
73
- plan = json.loads(response.choices[0].message.content)
74
- return json.dumps(plan, ensure_ascii=False, indent=2)
75
- except json.JSONDecodeError:
76
- # 如果無法解析 JSON,直接返回原始回應
77
- return response.choices[0].message.content
1
+ from langchain_core.tools import tool
2
+ import litellm
3
+ import json
4
+
5
+ SYSTEM_PROMPT = """你是一個專業的研究規劃助手。你的工作是:
6
+ 1. 分析使用者的研究需求
7
+ 2. 規劃完整的研究步驟
8
+ 3. 如果使用者指定特定步驟,你要規劃如何執行這些步驟
9
+
10
+ 你必須嚴格按照以下 JSON 格式回覆,不要加入任何其他文字:
11
+ {
12
+ "analysis": "對使用者需求的分析",
13
+ "steps": [
14
+ {
15
+ "step": "步驟1",
16
+ "description": "詳細說明",
17
+ "expected_outcome": "預期結果"
18
+ }
19
+ ]
20
+ }"""
21
+
22
+ JSON_SCHEMA = {
23
+ "type": "object",
24
+ "properties": {
25
+ "analysis": {"type": "string", "description": "對使用者需求的分析"},
26
+ "steps": {
27
+ "type": "array",
28
+ "items": {
29
+ "type": "object",
30
+ "properties": {
31
+ "step": {"type": "string"},
32
+ "description": {"type": "string"},
33
+ "expected_outcome": {"type": "string"},
34
+ },
35
+ "required": ["step", "description", "expected_outcome"],
36
+ },
37
+ },
38
+ },
39
+ "required": ["analysis", "steps"],
40
+ }
41
+
42
+
43
+ @tool
44
+ def step_planner(user_input: str) -> str:
45
+ """
46
+ 研究規劃工具 - 負責規劃研究步驟和執行計劃
47
+
48
+ 這個工具會:
49
+ 1. 分析使用者的研究需求
50
+ 2. 根據現有工具規劃完整的研究步驟
51
+ 3. 處理使用者指定的執行步驟
52
+
53
+ Args:
54
+ user_input (str): 使用者的研究需求或指定步驟
55
+
56
+ Returns:
57
+ str: JSON 格式的研究計劃,包含分析和詳細步驟
58
+ """
59
+
60
+ print("step_planner user_input============>", user_input)
61
+ response = litellm.completion(
62
+ model="o3-mini",
63
+ messages=[
64
+ {"role": "system", "content": SYSTEM_PROMPT},
65
+ {"role": "user", "content": user_input},
66
+ ],
67
+ response_format={"type": "json_object", "schema": JSON_SCHEMA},
68
+ reasoning_effort="high",
69
+ )
70
+
71
+ try:
72
+ # 確保回應是有效的 JSON 格式
73
+ plan = json.loads(response.choices[0].message.content)
74
+ return json.dumps(plan, ensure_ascii=False, indent=2)
75
+ except json.JSONDecodeError:
76
+ # 如果無法解析 JSON,直接返回原始回應
77
+ return response.choices[0].message.content