botrun-flow-lang 5.9.301__py3-none-any.whl → 5.10.82__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. botrun_flow_lang/api/auth_api.py +39 -39
  2. botrun_flow_lang/api/auth_utils.py +183 -183
  3. botrun_flow_lang/api/botrun_back_api.py +65 -65
  4. botrun_flow_lang/api/flow_api.py +3 -3
  5. botrun_flow_lang/api/hatch_api.py +481 -481
  6. botrun_flow_lang/api/langgraph_api.py +796 -796
  7. botrun_flow_lang/api/line_bot_api.py +1357 -1357
  8. botrun_flow_lang/api/model_api.py +300 -300
  9. botrun_flow_lang/api/rate_limit_api.py +32 -32
  10. botrun_flow_lang/api/routes.py +79 -79
  11. botrun_flow_lang/api/search_api.py +53 -53
  12. botrun_flow_lang/api/storage_api.py +316 -316
  13. botrun_flow_lang/api/subsidy_api.py +290 -290
  14. botrun_flow_lang/api/subsidy_api_system_prompt.txt +109 -109
  15. botrun_flow_lang/api/user_setting_api.py +70 -70
  16. botrun_flow_lang/api/version_api.py +31 -31
  17. botrun_flow_lang/api/youtube_api.py +26 -26
  18. botrun_flow_lang/constants.py +13 -13
  19. botrun_flow_lang/langgraph_agents/agents/agent_runner.py +174 -174
  20. botrun_flow_lang/langgraph_agents/agents/agent_tools/step_planner.py +77 -77
  21. botrun_flow_lang/langgraph_agents/agents/checkpointer/firestore_checkpointer.py +666 -666
  22. botrun_flow_lang/langgraph_agents/agents/gov_researcher/GOV_RESEARCHER_PRD.md +192 -192
  23. botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_2_graph.py +1002 -1002
  24. botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_graph.py +822 -822
  25. botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py +548 -542
  26. botrun_flow_lang/langgraph_agents/agents/search_agent_graph.py +864 -864
  27. botrun_flow_lang/langgraph_agents/agents/tools/__init__.py +4 -4
  28. botrun_flow_lang/langgraph_agents/agents/tools/gemini_code_execution.py +376 -376
  29. botrun_flow_lang/langgraph_agents/agents/util/gemini_grounding.py +66 -66
  30. botrun_flow_lang/langgraph_agents/agents/util/html_util.py +316 -316
  31. botrun_flow_lang/langgraph_agents/agents/util/img_util.py +294 -294
  32. botrun_flow_lang/langgraph_agents/agents/util/local_files.py +345 -345
  33. botrun_flow_lang/langgraph_agents/agents/util/mermaid_util.py +86 -86
  34. botrun_flow_lang/langgraph_agents/agents/util/model_utils.py +143 -143
  35. botrun_flow_lang/langgraph_agents/agents/util/pdf_analyzer.py +160 -160
  36. botrun_flow_lang/langgraph_agents/agents/util/perplexity_search.py +464 -464
  37. botrun_flow_lang/langgraph_agents/agents/util/plotly_util.py +59 -59
  38. botrun_flow_lang/langgraph_agents/agents/util/tavily_search.py +199 -199
  39. botrun_flow_lang/langgraph_agents/agents/util/youtube_util.py +90 -90
  40. botrun_flow_lang/langgraph_agents/cache/langgraph_botrun_cache.py +197 -197
  41. botrun_flow_lang/llm_agent/llm_agent.py +19 -19
  42. botrun_flow_lang/llm_agent/llm_agent_util.py +83 -83
  43. botrun_flow_lang/log/.gitignore +2 -2
  44. botrun_flow_lang/main.py +61 -61
  45. botrun_flow_lang/main_fast.py +51 -51
  46. botrun_flow_lang/mcp_server/__init__.py +10 -10
  47. botrun_flow_lang/mcp_server/default_mcp.py +711 -711
  48. botrun_flow_lang/models/nodes/utils.py +205 -205
  49. botrun_flow_lang/models/token_usage.py +34 -34
  50. botrun_flow_lang/requirements.txt +21 -21
  51. botrun_flow_lang/services/base/firestore_base.py +30 -30
  52. botrun_flow_lang/services/hatch/hatch_factory.py +11 -11
  53. botrun_flow_lang/services/hatch/hatch_fs_store.py +372 -372
  54. botrun_flow_lang/services/storage/storage_cs_store.py +202 -202
  55. botrun_flow_lang/services/storage/storage_factory.py +12 -12
  56. botrun_flow_lang/services/storage/storage_store.py +65 -65
  57. botrun_flow_lang/services/user_setting/user_setting_factory.py +9 -9
  58. botrun_flow_lang/services/user_setting/user_setting_fs_store.py +66 -66
  59. botrun_flow_lang/static/docs/tools/index.html +926 -926
  60. botrun_flow_lang/tests/api_functional_tests.py +1525 -1525
  61. botrun_flow_lang/tests/api_stress_test.py +357 -357
  62. botrun_flow_lang/tests/shared_hatch_tests.py +333 -333
  63. botrun_flow_lang/tests/test_botrun_app.py +46 -46
  64. botrun_flow_lang/tests/test_html_util.py +31 -31
  65. botrun_flow_lang/tests/test_img_analyzer.py +190 -190
  66. botrun_flow_lang/tests/test_img_util.py +39 -39
  67. botrun_flow_lang/tests/test_local_files.py +114 -114
  68. botrun_flow_lang/tests/test_mermaid_util.py +103 -103
  69. botrun_flow_lang/tests/test_pdf_analyzer.py +104 -104
  70. botrun_flow_lang/tests/test_plotly_util.py +151 -151
  71. botrun_flow_lang/tests/test_run_workflow_engine.py +65 -65
  72. botrun_flow_lang/tools/generate_docs.py +133 -133
  73. botrun_flow_lang/tools/templates/tools.html +153 -153
  74. botrun_flow_lang/utils/__init__.py +7 -7
  75. botrun_flow_lang/utils/botrun_logger.py +344 -344
  76. botrun_flow_lang/utils/clients/rate_limit_client.py +209 -209
  77. botrun_flow_lang/utils/clients/token_verify_client.py +153 -153
  78. botrun_flow_lang/utils/google_drive_utils.py +654 -654
  79. botrun_flow_lang/utils/langchain_utils.py +324 -324
  80. botrun_flow_lang/utils/yaml_utils.py +9 -9
  81. {botrun_flow_lang-5.9.301.dist-info → botrun_flow_lang-5.10.82.dist-info}/METADATA +2 -2
  82. botrun_flow_lang-5.10.82.dist-info/RECORD +99 -0
  83. botrun_flow_lang-5.9.301.dist-info/RECORD +0 -99
  84. {botrun_flow_lang-5.9.301.dist-info → botrun_flow_lang-5.10.82.dist-info}/WHEEL +0 -0
@@ -1,174 +1,174 @@
1
- from typing import AsyncGenerator, Dict, List, Optional, Union, Any
2
- import logging
3
-
4
- from pydantic import BaseModel
5
-
6
-
7
- class StepsUpdateEvent(BaseModel):
8
- """
9
- for step in steps:
10
- print("Description:", step.get("description", ""))
11
- print("Status:", step.get("status", ""))
12
- print("Updates:", step.get("updates", ""))
13
- """
14
-
15
- steps: List = []
16
-
17
-
18
- class OnNodeStreamEvent(BaseModel):
19
- chunk: str
20
-
21
-
22
- class ChatModelEndEvent(BaseModel):
23
- """
24
- Chat Model End Event 資料模型
25
- 提供 on_chat_model_end 事件的原始資料,供呼叫端自行處理
26
- """
27
-
28
- # 原始事件資料
29
- raw_output: Any = None # 來自 event["data"]["output"]
30
- raw_input: Any = None # 來自 event["data"]["input"]
31
-
32
- # 基本 metadata
33
- langgraph_node: str = ""
34
- usage_metadata: Dict = {}
35
- model_name: str = ""
36
-
37
- # chunk
38
- chunk: str = ""
39
-
40
-
41
- MAX_RECURSION_LIMIT = 25
42
-
43
-
44
- # graph 是 CompiledStateGraph,不傳入型別的原因是,loading import 需要 0.5秒
45
- async def langgraph_runner(
46
- thread_id: str,
47
- init_state: dict,
48
- graph,
49
- need_resume: bool = False,
50
- extra_config: Optional[Dict] = None,
51
- ) -> AsyncGenerator:
52
- """
53
- 這個 function 與 agent_runner 的差別在於,langgraph_runner 是回傳原原本本 LangGraph 的 event,而 agent_runner 是回傳 LangGraph 的 event 經過處理後的 event。
54
- """
55
- invoke_state = init_state
56
- config = {
57
- "configurable": {"thread_id": thread_id},
58
- "recursion_limit": MAX_RECURSION_LIMIT,
59
- }
60
- if extra_config:
61
- config["configurable"].update(extra_config)
62
- if need_resume:
63
- state_history = []
64
- async for state in graph.aget_state_history(config):
65
- state_history.append(state)
66
-
67
- # 如果 state_history 的長度超過 MAX_RECURSION_LIMIT,動態調整 recursion_limit
68
- if len(state_history) > MAX_RECURSION_LIMIT:
69
- # 計算超出的倍數
70
- multiplier = (len(state_history) - 1) // MAX_RECURSION_LIMIT
71
- # 設定新的 recursion_limit 為 (multiplier + 1) * MAX_RECURSION_LIMIT
72
- config["recursion_limit"] = (multiplier + 1) * MAX_RECURSION_LIMIT
73
-
74
- async for event in graph.astream_events(
75
- invoke_state,
76
- config,
77
- version="v2",
78
- ):
79
- # state = await graph.aget_state(config)
80
- # print(state.config)
81
-
82
- yield event
83
-
84
-
85
- # graph 是 CompiledStateGraph,不傳入型別的原因是,loading import 需要 0.5秒
86
- async def agent_runner(
87
- thread_id: str,
88
- init_state: dict,
89
- graph,
90
- need_resume: bool = False,
91
- extra_config: Optional[Dict] = None,
92
- ) -> AsyncGenerator[
93
- Union[StepsUpdateEvent, OnNodeStreamEvent, ChatModelEndEvent], None
94
- ]:
95
- invoke_state = init_state
96
- config = {
97
- "configurable": {"thread_id": thread_id},
98
- "recursion_limit": MAX_RECURSION_LIMIT,
99
- }
100
- if extra_config:
101
- config["configurable"].update(extra_config)
102
- if need_resume:
103
- state_history = []
104
- async for state in graph.aget_state_history(config):
105
- state_history.append(state)
106
-
107
- # 如果 state_history 的長度超過 MAX_RECURSION_LIMIT,動態調整 recursion_limit
108
- if len(state_history) > MAX_RECURSION_LIMIT:
109
- # 計算超出的倍數
110
- multiplier = (len(state_history) - 1) // MAX_RECURSION_LIMIT
111
- # 設定新的 recursion_limit 為 (multiplier + 1) * MAX_RECURSION_LIMIT
112
- config["recursion_limit"] = (multiplier + 1) * MAX_RECURSION_LIMIT
113
-
114
- async for event in graph.astream_events(
115
- invoke_state,
116
- config,
117
- version="v2",
118
- ):
119
- if event["event"] == "on_chain_end":
120
- pass
121
- if event["event"] == "on_chat_model_end":
122
- data = event.get("data", {})
123
- logging.info(f"[Agent Runner] on_chat_model_end data: {data}")
124
- metadata = event.get("metadata", {})
125
- langgraph_node = metadata.get("langgraph_node", {})
126
- output = data.get("output", {})
127
-
128
- # 提取常用的 metadata(為了向後相容性)
129
- usage_metadata = {}
130
- model_name = ""
131
-
132
- if hasattr(output, "usage_metadata"):
133
- usage_metadata = output.usage_metadata if output.usage_metadata else {}
134
-
135
- if hasattr(output, "response_metadata"):
136
- model_name = output.response_metadata.get("model_name", "")
137
-
138
- chat_model_end_event = ChatModelEndEvent(
139
- raw_output=data.get("output", {}),
140
- raw_input=data.get("input", {}),
141
- langgraph_node=langgraph_node,
142
- usage_metadata=usage_metadata,
143
- model_name=model_name,
144
- )
145
- yield chat_model_end_event
146
- if event["event"] == "on_chat_model_stream":
147
- data = event["data"]
148
- if (
149
- data["chunk"].content
150
- and isinstance(data["chunk"].content[0], dict)
151
- and data["chunk"].content[0].get("text", "")
152
- ):
153
- yield OnNodeStreamEvent(chunk=data["chunk"].content[0].get("text", ""))
154
- elif data["chunk"].content and isinstance(data["chunk"].content, str):
155
- yield OnNodeStreamEvent(chunk=data["chunk"].content)
156
-
157
-
158
- # def handle_copilotkit_intermediate_state(event: dict):
159
- # print("Handling copilotkit intermediate state")
160
- # copilotkit_intermediate_state = event["metadata"].get(
161
- # "copilotkit:emit-intermediate-state"
162
- # )
163
- # print(f"Intermediate state: {copilotkit_intermediate_state}")
164
- # if copilotkit_intermediate_state:
165
- # for intermediate_state in copilotkit_intermediate_state:
166
- # if intermediate_state.get("state_key", "") == "steps":
167
- # for tool_call in event["data"]["output"].tool_calls:
168
- # if tool_call.get("name", "") == intermediate_state.get("tool", ""):
169
- # steps = tool_call["args"].get(
170
- # intermediate_state.get("tool_argument")
171
- # )
172
- # print(f"Yielding steps: {steps}")
173
- # yield StepsUpdateEvent(steps=steps)
174
- # print("--------------------------------")
1
+ from typing import AsyncGenerator, Dict, List, Optional, Union, Any
2
+ import logging
3
+
4
+ from pydantic import BaseModel
5
+
6
+
7
+ class StepsUpdateEvent(BaseModel):
8
+ """
9
+ for step in steps:
10
+ print("Description:", step.get("description", ""))
11
+ print("Status:", step.get("status", ""))
12
+ print("Updates:", step.get("updates", ""))
13
+ """
14
+
15
+ steps: List = []
16
+
17
+
18
+ class OnNodeStreamEvent(BaseModel):
19
+ chunk: str
20
+
21
+
22
+ class ChatModelEndEvent(BaseModel):
23
+ """
24
+ Chat Model End Event 資料模型
25
+ 提供 on_chat_model_end 事件的原始資料,供呼叫端自行處理
26
+ """
27
+
28
+ # 原始事件資料
29
+ raw_output: Any = None # 來自 event["data"]["output"]
30
+ raw_input: Any = None # 來自 event["data"]["input"]
31
+
32
+ # 基本 metadata
33
+ langgraph_node: str = ""
34
+ usage_metadata: Dict = {}
35
+ model_name: str = ""
36
+
37
+ # chunk
38
+ chunk: str = ""
39
+
40
+
41
+ MAX_RECURSION_LIMIT = 25
42
+
43
+
44
+ # graph 是 CompiledStateGraph,不傳入型別的原因是,loading import 需要 0.5秒
45
+ async def langgraph_runner(
46
+ thread_id: str,
47
+ init_state: dict,
48
+ graph,
49
+ need_resume: bool = False,
50
+ extra_config: Optional[Dict] = None,
51
+ ) -> AsyncGenerator:
52
+ """
53
+ 這個 function 與 agent_runner 的差別在於,langgraph_runner 是回傳原原本本 LangGraph 的 event,而 agent_runner 是回傳 LangGraph 的 event 經過處理後的 event。
54
+ """
55
+ invoke_state = init_state
56
+ config = {
57
+ "configurable": {"thread_id": thread_id},
58
+ "recursion_limit": MAX_RECURSION_LIMIT,
59
+ }
60
+ if extra_config:
61
+ config["configurable"].update(extra_config)
62
+ if need_resume:
63
+ state_history = []
64
+ async for state in graph.aget_state_history(config):
65
+ state_history.append(state)
66
+
67
+ # 如果 state_history 的長度超過 MAX_RECURSION_LIMIT,動態調整 recursion_limit
68
+ if len(state_history) > MAX_RECURSION_LIMIT:
69
+ # 計算超出的倍數
70
+ multiplier = (len(state_history) - 1) // MAX_RECURSION_LIMIT
71
+ # 設定新的 recursion_limit 為 (multiplier + 1) * MAX_RECURSION_LIMIT
72
+ config["recursion_limit"] = (multiplier + 1) * MAX_RECURSION_LIMIT
73
+
74
+ async for event in graph.astream_events(
75
+ invoke_state,
76
+ config,
77
+ version="v2",
78
+ ):
79
+ # state = await graph.aget_state(config)
80
+ # print(state.config)
81
+
82
+ yield event
83
+
84
+
85
+ # graph 是 CompiledStateGraph,不傳入型別的原因是,loading import 需要 0.5秒
86
+ async def agent_runner(
87
+ thread_id: str,
88
+ init_state: dict,
89
+ graph,
90
+ need_resume: bool = False,
91
+ extra_config: Optional[Dict] = None,
92
+ ) -> AsyncGenerator[
93
+ Union[StepsUpdateEvent, OnNodeStreamEvent, ChatModelEndEvent], None
94
+ ]:
95
+ invoke_state = init_state
96
+ config = {
97
+ "configurable": {"thread_id": thread_id},
98
+ "recursion_limit": MAX_RECURSION_LIMIT,
99
+ }
100
+ if extra_config:
101
+ config["configurable"].update(extra_config)
102
+ if need_resume:
103
+ state_history = []
104
+ async for state in graph.aget_state_history(config):
105
+ state_history.append(state)
106
+
107
+ # 如果 state_history 的長度超過 MAX_RECURSION_LIMIT,動態調整 recursion_limit
108
+ if len(state_history) > MAX_RECURSION_LIMIT:
109
+ # 計算超出的倍數
110
+ multiplier = (len(state_history) - 1) // MAX_RECURSION_LIMIT
111
+ # 設定新的 recursion_limit 為 (multiplier + 1) * MAX_RECURSION_LIMIT
112
+ config["recursion_limit"] = (multiplier + 1) * MAX_RECURSION_LIMIT
113
+
114
+ async for event in graph.astream_events(
115
+ invoke_state,
116
+ config,
117
+ version="v2",
118
+ ):
119
+ if event["event"] == "on_chain_end":
120
+ pass
121
+ if event["event"] == "on_chat_model_end":
122
+ data = event.get("data", {})
123
+ logging.info(f"[Agent Runner] on_chat_model_end data: {data}")
124
+ metadata = event.get("metadata", {})
125
+ langgraph_node = metadata.get("langgraph_node", {})
126
+ output = data.get("output", {})
127
+
128
+ # 提取常用的 metadata(為了向後相容性)
129
+ usage_metadata = {}
130
+ model_name = ""
131
+
132
+ if hasattr(output, "usage_metadata"):
133
+ usage_metadata = output.usage_metadata if output.usage_metadata else {}
134
+
135
+ if hasattr(output, "response_metadata"):
136
+ model_name = output.response_metadata.get("model_name", "")
137
+
138
+ chat_model_end_event = ChatModelEndEvent(
139
+ raw_output=data.get("output", {}),
140
+ raw_input=data.get("input", {}),
141
+ langgraph_node=langgraph_node,
142
+ usage_metadata=usage_metadata,
143
+ model_name=model_name,
144
+ )
145
+ yield chat_model_end_event
146
+ if event["event"] == "on_chat_model_stream":
147
+ data = event["data"]
148
+ if (
149
+ data["chunk"].content
150
+ and isinstance(data["chunk"].content[0], dict)
151
+ and data["chunk"].content[0].get("text", "")
152
+ ):
153
+ yield OnNodeStreamEvent(chunk=data["chunk"].content[0].get("text", ""))
154
+ elif data["chunk"].content and isinstance(data["chunk"].content, str):
155
+ yield OnNodeStreamEvent(chunk=data["chunk"].content)
156
+
157
+
158
+ # def handle_copilotkit_intermediate_state(event: dict):
159
+ # print("Handling copilotkit intermediate state")
160
+ # copilotkit_intermediate_state = event["metadata"].get(
161
+ # "copilotkit:emit-intermediate-state"
162
+ # )
163
+ # print(f"Intermediate state: {copilotkit_intermediate_state}")
164
+ # if copilotkit_intermediate_state:
165
+ # for intermediate_state in copilotkit_intermediate_state:
166
+ # if intermediate_state.get("state_key", "") == "steps":
167
+ # for tool_call in event["data"]["output"].tool_calls:
168
+ # if tool_call.get("name", "") == intermediate_state.get("tool", ""):
169
+ # steps = tool_call["args"].get(
170
+ # intermediate_state.get("tool_argument")
171
+ # )
172
+ # print(f"Yielding steps: {steps}")
173
+ # yield StepsUpdateEvent(steps=steps)
174
+ # print("--------------------------------")
@@ -1,77 +1,77 @@
1
- from langchain_core.tools import tool
2
- import litellm
3
- import json
4
-
5
- SYSTEM_PROMPT = """你是一個專業的研究規劃助手。你的工作是:
6
- 1. 分析使用者的研究需求
7
- 2. 規劃完整的研究步驟
8
- 3. 如果使用者指定特定步驟,你要規劃如何執行這些步驟
9
-
10
- 你必須嚴格按照以下 JSON 格式回覆,不要加入任何其他文字:
11
- {
12
- "analysis": "對使用者需求的分析",
13
- "steps": [
14
- {
15
- "step": "步驟1",
16
- "description": "詳細說明",
17
- "expected_outcome": "預期結果"
18
- }
19
- ]
20
- }"""
21
-
22
- JSON_SCHEMA = {
23
- "type": "object",
24
- "properties": {
25
- "analysis": {"type": "string", "description": "對使用者需求的分析"},
26
- "steps": {
27
- "type": "array",
28
- "items": {
29
- "type": "object",
30
- "properties": {
31
- "step": {"type": "string"},
32
- "description": {"type": "string"},
33
- "expected_outcome": {"type": "string"},
34
- },
35
- "required": ["step", "description", "expected_outcome"],
36
- },
37
- },
38
- },
39
- "required": ["analysis", "steps"],
40
- }
41
-
42
-
43
- @tool
44
- def step_planner(user_input: str) -> str:
45
- """
46
- 研究規劃工具 - 負責規劃研究步驟和執行計劃
47
-
48
- 這個工具會:
49
- 1. 分析使用者的研究需求
50
- 2. 根據現有工具規劃完整的研究步驟
51
- 3. 處理使用者指定的執行步驟
52
-
53
- Args:
54
- user_input (str): 使用者的研究需求或指定步驟
55
-
56
- Returns:
57
- str: JSON 格式的研究計劃,包含分析和詳細步驟
58
- """
59
-
60
- print("step_planner user_input============>", user_input)
61
- response = litellm.completion(
62
- model="o3-mini",
63
- messages=[
64
- {"role": "system", "content": SYSTEM_PROMPT},
65
- {"role": "user", "content": user_input},
66
- ],
67
- response_format={"type": "json_object", "schema": JSON_SCHEMA},
68
- reasoning_effort="high",
69
- )
70
-
71
- try:
72
- # 確保回應是有效的 JSON 格式
73
- plan = json.loads(response.choices[0].message.content)
74
- return json.dumps(plan, ensure_ascii=False, indent=2)
75
- except json.JSONDecodeError:
76
- # 如果無法解析 JSON,直接返回原始回應
77
- return response.choices[0].message.content
1
+ from langchain_core.tools import tool
2
+ import litellm
3
+ import json
4
+
5
+ SYSTEM_PROMPT = """你是一個專業的研究規劃助手。你的工作是:
6
+ 1. 分析使用者的研究需求
7
+ 2. 規劃完整的研究步驟
8
+ 3. 如果使用者指定特定步驟,你要規劃如何執行這些步驟
9
+
10
+ 你必須嚴格按照以下 JSON 格式回覆,不要加入任何其他文字:
11
+ {
12
+ "analysis": "對使用者需求的分析",
13
+ "steps": [
14
+ {
15
+ "step": "步驟1",
16
+ "description": "詳細說明",
17
+ "expected_outcome": "預期結果"
18
+ }
19
+ ]
20
+ }"""
21
+
22
+ JSON_SCHEMA = {
23
+ "type": "object",
24
+ "properties": {
25
+ "analysis": {"type": "string", "description": "對使用者需求的分析"},
26
+ "steps": {
27
+ "type": "array",
28
+ "items": {
29
+ "type": "object",
30
+ "properties": {
31
+ "step": {"type": "string"},
32
+ "description": {"type": "string"},
33
+ "expected_outcome": {"type": "string"},
34
+ },
35
+ "required": ["step", "description", "expected_outcome"],
36
+ },
37
+ },
38
+ },
39
+ "required": ["analysis", "steps"],
40
+ }
41
+
42
+
43
+ @tool
44
+ def step_planner(user_input: str) -> str:
45
+ """
46
+ 研究規劃工具 - 負責規劃研究步驟和執行計劃
47
+
48
+ 這個工具會:
49
+ 1. 分析使用者的研究需求
50
+ 2. 根據現有工具規劃完整的研究步驟
51
+ 3. 處理使用者指定的執行步驟
52
+
53
+ Args:
54
+ user_input (str): 使用者的研究需求或指定步驟
55
+
56
+ Returns:
57
+ str: JSON 格式的研究計劃,包含分析和詳細步驟
58
+ """
59
+
60
+ print("step_planner user_input============>", user_input)
61
+ response = litellm.completion(
62
+ model="o3-mini",
63
+ messages=[
64
+ {"role": "system", "content": SYSTEM_PROMPT},
65
+ {"role": "user", "content": user_input},
66
+ ],
67
+ response_format={"type": "json_object", "schema": JSON_SCHEMA},
68
+ reasoning_effort="high",
69
+ )
70
+
71
+ try:
72
+ # 確保回應是有效的 JSON 格式
73
+ plan = json.loads(response.choices[0].message.content)
74
+ return json.dumps(plan, ensure_ascii=False, indent=2)
75
+ except json.JSONDecodeError:
76
+ # 如果無法解析 JSON,直接返回原始回應
77
+ return response.choices[0].message.content