pycoze 0.1.79__tar.gz → 0.1.81__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {pycoze-0.1.79 → pycoze-0.1.81}/PKG-INFO +1 -1
- pycoze-0.1.81/pycoze/bot/agent/agent.py +89 -0
- pycoze-0.1.81/pycoze/bot/agent/agent_types/__init__.py +4 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze/bot/agent/assistant.py +34 -35
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze/bot/bot.py +12 -5
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze.egg-info/PKG-INFO +1 -1
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze.egg-info/SOURCES.txt +0 -2
- {pycoze-0.1.79 → pycoze-0.1.81}/setup.py +1 -1
- pycoze-0.1.79/pycoze/bot/agent/agent.py +0 -111
- pycoze-0.1.79/pycoze/bot/agent/agent_types/__init__.py +0 -5
- pycoze-0.1.79/pycoze/bot/agent/agent_types/react_agent.py +0 -170
- pycoze-0.1.79/pycoze/bot/agent/agent_types/react_prompt.py +0 -91
- {pycoze-0.1.79 → pycoze-0.1.81}/LICENSE +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/README.md +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze/__init__.py +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze/access/__init__.py +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze/access/tool_for_bot.py +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze/ai/__init__.py +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze/ai/comfyui.py +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze/ai/vram_reserve.py +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze/bot/__init__.py +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze/bot/agent/__init__.py +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze/bot/agent/agent_types/openai_func_call_agent.py +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze/bot/agent/chat.py +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze/module.py +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze/ui/__init__.py +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze/ui/base.py +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze/ui/color.py +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze/ui/typ.py +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze/ui/ui_def.py +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze/utils/__init__.py +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze/utils/arg.py +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze/utils/text_or_file.py +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze.egg-info/dependency_links.txt +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/pycoze.egg-info/top_level.txt +0 -0
- {pycoze-0.1.79 → pycoze-0.1.81}/setup.cfg +0 -0
@@ -0,0 +1,89 @@
|
|
1
|
+
import asyncio
|
2
|
+
import json
|
3
|
+
from langchain_openai import ChatOpenAI
|
4
|
+
from .chat import info
|
5
|
+
from .assistant import Runnable
|
6
|
+
from langchain_core.messages import (
|
7
|
+
HumanMessage,
|
8
|
+
AIMessage,
|
9
|
+
AIMessageChunk,
|
10
|
+
SystemMessage,
|
11
|
+
)
|
12
|
+
from langchain_core.agents import AgentFinish
|
13
|
+
|
14
|
+
|
15
|
+
async def run_agent(agent, inputs: list):
|
16
|
+
exist_ids = set()
|
17
|
+
content_list = []
|
18
|
+
async for event in agent.astream_events(inputs, version="v2"):
|
19
|
+
kind = event["event"]
|
20
|
+
if kind == "on_chain_end":
|
21
|
+
if "data" in event:
|
22
|
+
if (
|
23
|
+
"output" in event["data"]
|
24
|
+
and event["data"]["output"] == "end"
|
25
|
+
and "input" in event["data"]
|
26
|
+
and isinstance(event["data"]["input"], list)
|
27
|
+
):
|
28
|
+
input_list = event["data"]["input"]
|
29
|
+
for msg in input_list:
|
30
|
+
if isinstance(msg, HumanMessage) or isinstance(
|
31
|
+
msg, SystemMessage
|
32
|
+
):
|
33
|
+
content_list = []
|
34
|
+
if isinstance(msg, AIMessage) and not isinstance(
|
35
|
+
msg, AIMessageChunk
|
36
|
+
):
|
37
|
+
content = msg.content
|
38
|
+
if content:
|
39
|
+
content_list.append(content)
|
40
|
+
elif kind == "on_chat_model_stream":
|
41
|
+
content = event["data"]["chunk"].content
|
42
|
+
if content:
|
43
|
+
info("assistant", content)
|
44
|
+
elif kind == "on_chain_start":
|
45
|
+
data = event["data"]
|
46
|
+
if "input" in data:
|
47
|
+
input_list = (
|
48
|
+
data["input"]
|
49
|
+
if isinstance(data["input"], list)
|
50
|
+
else [data["input"]]
|
51
|
+
)
|
52
|
+
if len(input_list) == 0:
|
53
|
+
continue
|
54
|
+
msg = input_list[-1]
|
55
|
+
if isinstance(msg, AIMessage) and not isinstance(msg, AIMessageChunk):
|
56
|
+
if "tool_calls" in msg.additional_kwargs:
|
57
|
+
tool_calls = msg.additional_kwargs["tool_calls"]
|
58
|
+
for t in tool_calls:
|
59
|
+
if t["id"] in exist_ids:
|
60
|
+
continue
|
61
|
+
exist_ids.add(t["id"])
|
62
|
+
tool = t["function"]["name"]
|
63
|
+
info("assistant", f"\n[调用工具:{tool}]\n\n")
|
64
|
+
|
65
|
+
return "\n".join(content_list)
|
66
|
+
|
67
|
+
|
68
|
+
if __name__ == "__main__":
|
69
|
+
from langchain_experimental.tools import PythonREPLTool
|
70
|
+
|
71
|
+
llm_file = r"C:\Users\aiqqq\AppData\Roaming\pycoze\JsonStorage\llm.json"
|
72
|
+
with open(llm_file, "r", encoding="utf-8") as f:
|
73
|
+
cfg = json.load(f)
|
74
|
+
chat = ChatOpenAI(
|
75
|
+
api_key=cfg["apiKey"],
|
76
|
+
base_url=cfg["baseURL"],
|
77
|
+
model=cfg["model"],
|
78
|
+
temperature=0,
|
79
|
+
)
|
80
|
+
python_tool = PythonREPLTool()
|
81
|
+
agent = Runnable(
|
82
|
+
agent_execution_mode="FuncCall",
|
83
|
+
tools=[python_tool],
|
84
|
+
llm=chat,
|
85
|
+
assistant_message="请以女友的口吻回答,输出不小于100字,可以随便说点其他的",
|
86
|
+
)
|
87
|
+
|
88
|
+
inputs = [HumanMessage(content="计算根号7+根号88")]
|
89
|
+
print(asyncio.run(run_agent(agent, inputs)))
|
@@ -1,35 +1,34 @@
|
|
1
|
-
from typing import Sequence
|
2
|
-
from langchain.tools import BaseTool
|
3
|
-
from langchain_core.language_models.base import LanguageModelLike
|
4
|
-
from langchain_core.runnables import RunnableBinding
|
5
|
-
from .agent_types import create_openai_func_call_agent_executor
|
6
|
-
|
7
|
-
|
8
|
-
class Runnable(RunnableBinding):
|
9
|
-
agent_execution_mode: str
|
10
|
-
tools: Sequence[BaseTool]
|
11
|
-
llm: LanguageModelLike
|
12
|
-
assistant_message: str
|
13
|
-
|
14
|
-
def __init__(
|
15
|
-
self,
|
16
|
-
*,
|
17
|
-
agent_execution_mode: str,
|
18
|
-
tools: Sequence[BaseTool],
|
19
|
-
llm: LanguageModelLike,
|
20
|
-
assistant_message: str,
|
21
|
-
) -> None:
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
)
|
1
|
+
from typing import Sequence
|
2
|
+
from langchain.tools import BaseTool
|
3
|
+
from langchain_core.language_models.base import LanguageModelLike
|
4
|
+
from langchain_core.runnables import RunnableBinding
|
5
|
+
from .agent_types import create_openai_func_call_agent_executor
|
6
|
+
|
7
|
+
|
8
|
+
class Runnable(RunnableBinding):
|
9
|
+
agent_execution_mode: str
|
10
|
+
tools: Sequence[BaseTool]
|
11
|
+
llm: LanguageModelLike
|
12
|
+
assistant_message: str
|
13
|
+
|
14
|
+
def __init__(
|
15
|
+
self,
|
16
|
+
*,
|
17
|
+
agent_execution_mode: str,
|
18
|
+
tools: Sequence[BaseTool],
|
19
|
+
llm: LanguageModelLike,
|
20
|
+
assistant_message: str,
|
21
|
+
) -> None:
|
22
|
+
|
23
|
+
agent_executor = create_openai_func_call_agent_executor(
|
24
|
+
tools, llm, assistant_message
|
25
|
+
)
|
26
|
+
agent_executor = agent_executor.with_config({"recursion_limit": 50})
|
27
|
+
super().__init__(
|
28
|
+
tools=tools,
|
29
|
+
llm=llm,
|
30
|
+
agent_execution_mode=agent_execution_mode,
|
31
|
+
assistant_message=assistant_message,
|
32
|
+
bound=agent_executor,
|
33
|
+
return_intermediate_steps=True,
|
34
|
+
)
|
@@ -5,6 +5,7 @@ import asyncio
|
|
5
5
|
from langchain_core.messages import HumanMessage
|
6
6
|
from pycoze import utils
|
7
7
|
from pycoze.access.tool_for_bot import import_tools
|
8
|
+
from langchain_core.utils.function_calling import convert_to_openai_tool
|
8
9
|
|
9
10
|
params = utils.arg.read_params_file()
|
10
11
|
llm_file = params["appPath"] + "/JsonStorage/llm.json"
|
@@ -36,7 +37,7 @@ def agent_chat(bot_setting_file, history):
|
|
36
37
|
model=cfg["model"],
|
37
38
|
temperature=(
|
38
39
|
role_setting["temperature"] * 2
|
39
|
-
if cfg["model"].startswith("deepseek")
|
40
|
+
if cfg["model"].startswith("deepseek") or cfg["model"].startswith("yi-")
|
40
41
|
else role_setting["temperature"]
|
41
42
|
),
|
42
43
|
stop_sequences=[
|
@@ -45,17 +46,23 @@ def agent_chat(bot_setting_file, history):
|
|
45
46
|
], # 停用deepseek的工具调用标记,不然会虚构工具调用过程和结果
|
46
47
|
)
|
47
48
|
prompt = role_setting["prompt"]
|
48
|
-
if
|
49
|
+
if (
|
50
|
+
cfg["model"].startswith("deepseek")
|
51
|
+
or cfg["model"].startswith("yi-")
|
52
|
+
and len(tools) > 0
|
53
|
+
):
|
49
54
|
prompt += """
|
50
55
|
如果需要调用工具,请使用以正确的json格式进行结尾(务必保证json格式正确,不要出现反斜杠未转义等问题):
|
51
56
|
```json
|
52
57
|
{"name": 函数名, "parameters": 参数词典}
|
53
58
|
```
|
54
59
|
"""
|
60
|
+
if cfg["model"].startswith("yi-"):
|
61
|
+
prompt += "\nAvailable functions:\n"
|
62
|
+
for t in tools:
|
63
|
+
prompt += f"\n```json\n{json.dumps(convert_to_openai_tool(t))}\n```"
|
55
64
|
agent = Runnable(
|
56
|
-
agent_execution_mode=
|
57
|
-
"ReAct" if cfg["model"] in ["command-r", "yi-lightning"] else "FuncCall"
|
58
|
-
), # 'FuncCall' or 'ReAct',大模型支持FuncCall的话就用FuncCall
|
65
|
+
agent_execution_mode="FuncCall",
|
59
66
|
tools=tools,
|
60
67
|
llm=chat,
|
61
68
|
assistant_message=prompt,
|
@@ -20,8 +20,6 @@ pycoze/bot/agent/assistant.py
|
|
20
20
|
pycoze/bot/agent/chat.py
|
21
21
|
pycoze/bot/agent/agent_types/__init__.py
|
22
22
|
pycoze/bot/agent/agent_types/openai_func_call_agent.py
|
23
|
-
pycoze/bot/agent/agent_types/react_agent.py
|
24
|
-
pycoze/bot/agent/agent_types/react_prompt.py
|
25
23
|
pycoze/ui/__init__.py
|
26
24
|
pycoze/ui/base.py
|
27
25
|
pycoze/ui/color.py
|
@@ -1,111 +0,0 @@
|
|
1
|
-
import asyncio
|
2
|
-
import json
|
3
|
-
from langchain_openai import ChatOpenAI
|
4
|
-
from .chat import info
|
5
|
-
from .assistant import Runnable
|
6
|
-
from langchain_core.messages import (
|
7
|
-
HumanMessage,
|
8
|
-
AIMessage,
|
9
|
-
AIMessageChunk,
|
10
|
-
SystemMessage,
|
11
|
-
)
|
12
|
-
from langchain_core.agents import AgentFinish
|
13
|
-
|
14
|
-
|
15
|
-
async def run_agent(agent, inputs: list):
|
16
|
-
if agent.agent_execution_mode == "FuncCall":
|
17
|
-
exist_ids = set()
|
18
|
-
content_list = []
|
19
|
-
async for event in agent.astream_events(inputs, version="v2"):
|
20
|
-
kind = event["event"]
|
21
|
-
if kind == "on_chain_end":
|
22
|
-
if "data" in event:
|
23
|
-
if (
|
24
|
-
"output" in event["data"]
|
25
|
-
and event["data"]["output"] == "end"
|
26
|
-
and "input" in event["data"]
|
27
|
-
and isinstance(event["data"]["input"], list)
|
28
|
-
):
|
29
|
-
input_list = event["data"]["input"]
|
30
|
-
for msg in input_list:
|
31
|
-
if isinstance(msg, HumanMessage) or isinstance(
|
32
|
-
msg, SystemMessage
|
33
|
-
):
|
34
|
-
content_list = []
|
35
|
-
if isinstance(msg, AIMessage) and not isinstance(
|
36
|
-
msg, AIMessageChunk
|
37
|
-
):
|
38
|
-
content = msg.content
|
39
|
-
if content:
|
40
|
-
content_list.append(content)
|
41
|
-
elif kind == "on_chat_model_stream":
|
42
|
-
content = event["data"]["chunk"].content
|
43
|
-
if content:
|
44
|
-
info("assistant", content)
|
45
|
-
elif kind == "on_chain_start":
|
46
|
-
data = event["data"]
|
47
|
-
if "input" in data:
|
48
|
-
input_list = (
|
49
|
-
data["input"]
|
50
|
-
if isinstance(data["input"], list)
|
51
|
-
else [data["input"]]
|
52
|
-
)
|
53
|
-
if len(input_list) == 0:
|
54
|
-
continue
|
55
|
-
msg = input_list[-1]
|
56
|
-
if isinstance(msg, AIMessage) and not isinstance(
|
57
|
-
msg, AIMessageChunk
|
58
|
-
):
|
59
|
-
if "tool_calls" in msg.additional_kwargs:
|
60
|
-
tool_calls = msg.additional_kwargs["tool_calls"]
|
61
|
-
for t in tool_calls:
|
62
|
-
if t["id"] in exist_ids:
|
63
|
-
continue
|
64
|
-
exist_ids.add(t["id"])
|
65
|
-
tool = t["function"]["name"]
|
66
|
-
info("assistant", f"\n[调用工具:{tool}]\n\n")
|
67
|
-
|
68
|
-
return "\n".join(content_list)
|
69
|
-
else:
|
70
|
-
assert agent.agent_execution_mode == "ReAct"
|
71
|
-
inputs_msg = {"input": inputs[-1].content, "chat_history": inputs[:-1]}
|
72
|
-
use_tools = []
|
73
|
-
async for event in agent.astream_events(inputs_msg, version="v2"):
|
74
|
-
kind = event["event"]
|
75
|
-
result = None
|
76
|
-
if kind == "on_chain_end":
|
77
|
-
if "data" in event:
|
78
|
-
if "output" in event["data"]:
|
79
|
-
output = event["data"]["output"]
|
80
|
-
if "agent_outcome" in output and "input" in output:
|
81
|
-
outcome = output["agent_outcome"]
|
82
|
-
if isinstance(outcome, AgentFinish):
|
83
|
-
result = outcome.return_values["output"]
|
84
|
-
elif kind == "on_tool_start":
|
85
|
-
use_tools.append(event["name"])
|
86
|
-
info("assistant", f"\n[调用工具:{use_tools}]\n\n")
|
87
|
-
return result
|
88
|
-
|
89
|
-
|
90
|
-
if __name__ == "__main__":
|
91
|
-
from langchain_experimental.tools import PythonREPLTool
|
92
|
-
|
93
|
-
llm_file = r"C:\Users\aiqqq\AppData\Roaming\pycoze\JsonStorage\llm.json"
|
94
|
-
with open(llm_file, "r", encoding="utf-8") as f:
|
95
|
-
cfg = json.load(f)
|
96
|
-
chat = ChatOpenAI(
|
97
|
-
api_key=cfg["apiKey"],
|
98
|
-
base_url=cfg["baseURL"],
|
99
|
-
model=cfg["model"],
|
100
|
-
temperature=0,
|
101
|
-
)
|
102
|
-
python_tool = PythonREPLTool()
|
103
|
-
agent = Runnable(
|
104
|
-
agent_execution_mode="FuncCall", # 'FuncCall' or 'ReAct',大模型支持FuncCall的话就用FuncCall
|
105
|
-
tools=[python_tool],
|
106
|
-
llm=chat,
|
107
|
-
assistant_message="请以女友的口吻回答,输出不小于100字,可以随便说点其他的",
|
108
|
-
)
|
109
|
-
|
110
|
-
inputs = [HumanMessage(content="计算根号7+根号88")]
|
111
|
-
print(asyncio.run(run_agent(agent, inputs)))
|
@@ -1,170 +0,0 @@
|
|
1
|
-
# https://github.com/langchain-ai/langgraph/blob/ea071935fef240d631305df12b6d83e9c363cef3/libs/langgraph/langgraph/prebuilt/agent_executor.py
|
2
|
-
import operator
|
3
|
-
from typing import Annotated, Sequence, TypedDict, Union
|
4
|
-
from langchain.tools import BaseTool
|
5
|
-
from langchain_core.agents import AgentAction, AgentFinish
|
6
|
-
from langchain_core.messages import BaseMessage
|
7
|
-
from langchain_core.language_models import LanguageModelLike
|
8
|
-
from langgraph.graph import END, StateGraph
|
9
|
-
from langgraph.graph.state import CompiledStateGraph
|
10
|
-
from langgraph.prebuilt.tool_executor import ToolExecutor
|
11
|
-
from langgraph.utils.runnable import RunnableCallable
|
12
|
-
from langchain.agents import create_structured_chat_agent
|
13
|
-
from .react_prompt import react_agent_prompt
|
14
|
-
|
15
|
-
|
16
|
-
def create_react_agent_executor(
|
17
|
-
tools: list[BaseTool],
|
18
|
-
llm: LanguageModelLike,
|
19
|
-
system_message: str,
|
20
|
-
**kwargs # ignore
|
21
|
-
):
|
22
|
-
prompt = react_agent_prompt.partial(assistant_message=system_message)
|
23
|
-
agent = create_structured_chat_agent(llm, tools, prompt)
|
24
|
-
agent_executer = create_agent_executor(agent, tools)
|
25
|
-
return agent_executer
|
26
|
-
|
27
|
-
|
28
|
-
def _get_agent_state(input_schema=None):
|
29
|
-
if input_schema is None:
|
30
|
-
|
31
|
-
class AgentState(TypedDict):
|
32
|
-
# The input string
|
33
|
-
input: str
|
34
|
-
# The list of previous messages in the conversation
|
35
|
-
chat_history: Sequence[BaseMessage]
|
36
|
-
# The outcome of a given call to the agent
|
37
|
-
# Needs `None` as a valid type, since this is what this will start as
|
38
|
-
agent_outcome: Union[AgentAction, AgentFinish, None]
|
39
|
-
# List of actions and corresponding observations
|
40
|
-
# Here we annotate this with `operator.add` to indicate that operations to
|
41
|
-
# this state should be ADDED to the existing values (not overwrite it)
|
42
|
-
intermediate_steps: Annotated[list[tuple[AgentAction, str]], operator.add]
|
43
|
-
|
44
|
-
else:
|
45
|
-
|
46
|
-
class AgentState(input_schema):
|
47
|
-
# The outcome of a given call to the agent
|
48
|
-
# Needs `None` as a valid type, since this is what this will start as
|
49
|
-
agent_outcome: Union[AgentAction, AgentFinish, None]
|
50
|
-
# List of actions and corresponding observations
|
51
|
-
# Here we annotate this with `operator.add` to indicate that operations to
|
52
|
-
# this state should be ADDED to the existing values (not overwrite it)
|
53
|
-
intermediate_steps: Annotated[list[tuple[AgentAction, str]], operator.add]
|
54
|
-
|
55
|
-
return AgentState
|
56
|
-
|
57
|
-
|
58
|
-
def create_agent_executor(
|
59
|
-
agent_runnable, tools, input_schema=None
|
60
|
-
) -> CompiledStateGraph:
|
61
|
-
"""This is a helper function for creating a graph that works with LangChain Agents.
|
62
|
-
|
63
|
-
Args:
|
64
|
-
agent_runnable (RunnableLike): The agent runnable.
|
65
|
-
tools (list): A list of tools to be used by the agent.
|
66
|
-
input_schema (dict, optional): The input schema for the agent. Defaults to None.
|
67
|
-
|
68
|
-
Returns:
|
69
|
-
The `CompiledStateGraph` object.
|
70
|
-
"""
|
71
|
-
|
72
|
-
if isinstance(tools, ToolExecutor):
|
73
|
-
tool_executor = tools
|
74
|
-
else:
|
75
|
-
tool_executor = ToolExecutor(tools)
|
76
|
-
|
77
|
-
state = _get_agent_state(input_schema)
|
78
|
-
|
79
|
-
# Define logic that will be used to determine which conditional edge to go down
|
80
|
-
|
81
|
-
def should_continue(data):
|
82
|
-
# If the agent outcome is an AgentFinish, then we return `exit` string
|
83
|
-
# This will be used when setting up the graph to define the flow
|
84
|
-
if isinstance(data["agent_outcome"], AgentFinish):
|
85
|
-
return "end"
|
86
|
-
# Otherwise, an AgentAction is returned
|
87
|
-
# Here we return `continue` string
|
88
|
-
# This will be used when setting up the graph to define the flow
|
89
|
-
else:
|
90
|
-
return "continue"
|
91
|
-
|
92
|
-
def run_agent(data, config):
|
93
|
-
agent_outcome = agent_runnable.invoke(data, config)
|
94
|
-
return {"agent_outcome": agent_outcome}
|
95
|
-
|
96
|
-
async def arun_agent(data, config):
|
97
|
-
agent_outcome = await agent_runnable.ainvoke(data, config)
|
98
|
-
return {"agent_outcome": agent_outcome}
|
99
|
-
|
100
|
-
# Define the function to execute tools
|
101
|
-
def execute_tools(data, config):
|
102
|
-
# Get the most recent agent_outcome - this is the key added in the `agent` above
|
103
|
-
agent_action = data["agent_outcome"]
|
104
|
-
if not isinstance(agent_action, list):
|
105
|
-
agent_action = [agent_action]
|
106
|
-
output = tool_executor.batch(agent_action, config, return_exceptions=True)
|
107
|
-
return {
|
108
|
-
"intermediate_steps": [
|
109
|
-
(action, str(out)) for action, out in zip(agent_action, output)
|
110
|
-
]
|
111
|
-
}
|
112
|
-
|
113
|
-
async def aexecute_tools(data, config):
|
114
|
-
# Get the most recent agent_outcome - this is the key added in the `agent` above
|
115
|
-
agent_action = data["agent_outcome"]
|
116
|
-
if not isinstance(agent_action, list):
|
117
|
-
agent_action = [agent_action]
|
118
|
-
output = await tool_executor.abatch(
|
119
|
-
agent_action, config, return_exceptions=True
|
120
|
-
)
|
121
|
-
return {
|
122
|
-
"intermediate_steps": [
|
123
|
-
(action, str(out)) for action, out in zip(agent_action, output)
|
124
|
-
]
|
125
|
-
}
|
126
|
-
|
127
|
-
# Define a new graph
|
128
|
-
workflow = StateGraph(state)
|
129
|
-
|
130
|
-
# Define the two nodes we will cycle between
|
131
|
-
workflow.add_node("agent", RunnableCallable(run_agent, arun_agent))
|
132
|
-
workflow.add_node("tools", RunnableCallable(execute_tools, aexecute_tools))
|
133
|
-
|
134
|
-
# Set the entrypoint as `agent`
|
135
|
-
# This means that this node is the first one called
|
136
|
-
workflow.set_entry_point("agent")
|
137
|
-
|
138
|
-
# We now add a conditional edge
|
139
|
-
workflow.add_conditional_edges(
|
140
|
-
# First, we define the start node. We use `agent`.
|
141
|
-
# This means these are the edges taken after the `agent` node is called.
|
142
|
-
"agent",
|
143
|
-
# Next, we pass in the function that will determine which node is called next.
|
144
|
-
should_continue,
|
145
|
-
# Finally we pass in a mapping.
|
146
|
-
# The keys are strings, and the values are other nodes.
|
147
|
-
# END is a special node marking that the graph should finish.
|
148
|
-
# What will happen is we will call `should_continue`, and then the output of that
|
149
|
-
# will be matched against the keys in this mapping.
|
150
|
-
# Based on which one it matches, that node will then be called.
|
151
|
-
{
|
152
|
-
# If `tools`, then we call the tool node.
|
153
|
-
"continue": "tools",
|
154
|
-
# Otherwise we finish.
|
155
|
-
"end": END,
|
156
|
-
},
|
157
|
-
)
|
158
|
-
|
159
|
-
# We now add a normal edge from `tools` to `agent`.
|
160
|
-
# This means that after `tools` is called, `agent` node is called next.
|
161
|
-
workflow.add_edge("tools", "agent")
|
162
|
-
|
163
|
-
# Finally, we compile it!
|
164
|
-
# This compiles it into a LangChain Runnable,
|
165
|
-
# meaning you can use it as you would any other runnable
|
166
|
-
return workflow.compile()
|
167
|
-
|
168
|
-
|
169
|
-
if __name__ == "__main__":
|
170
|
-
pass
|
@@ -1,91 +0,0 @@
|
|
1
|
-
from typing import List, Union
|
2
|
-
from langchain_core.prompts.chat import (
|
3
|
-
ChatPromptTemplate,
|
4
|
-
HumanMessagePromptTemplate,
|
5
|
-
SystemMessagePromptTemplate,
|
6
|
-
MessagesPlaceholder,
|
7
|
-
)
|
8
|
-
from langchain_core.messages import (
|
9
|
-
FunctionMessage,
|
10
|
-
SystemMessage,
|
11
|
-
ToolMessage,
|
12
|
-
AIMessage,
|
13
|
-
HumanMessage,
|
14
|
-
ChatMessage,
|
15
|
-
)
|
16
|
-
|
17
|
-
|
18
|
-
system_temp = """
|
19
|
-
{assistant_message}
|
20
|
-
|
21
|
-
Respond to the human as helpfully and accurately as possible. You have access to the following tools:
|
22
|
-
|
23
|
-
{tools}
|
24
|
-
|
25
|
-
Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
|
26
|
-
|
27
|
-
Valid "action" values: "Final Answer" or {tool_names}
|
28
|
-
|
29
|
-
Provide only ONE action per $JSON_BLOB, as shown:
|
30
|
-
|
31
|
-
```
|
32
|
-
{{{{
|
33
|
-
"action": $TOOL_NAME,
|
34
|
-
"action_input": $INPUT
|
35
|
-
}}}}
|
36
|
-
```
|
37
|
-
|
38
|
-
Follow this format:
|
39
|
-
|
40
|
-
Question: input question to answer
|
41
|
-
Thought: consider previous and subsequent steps
|
42
|
-
Action:
|
43
|
-
```
|
44
|
-
$JSON_BLOB
|
45
|
-
```
|
46
|
-
Observation: action result
|
47
|
-
... (repeat Thought/Action/Observation N times)
|
48
|
-
Thought: I know what to respond
|
49
|
-
Action:
|
50
|
-
```
|
51
|
-
{{{{
|
52
|
-
"action": "Final Answer",
|
53
|
-
"action_input": "Final response to human"
|
54
|
-
}}}}
|
55
|
-
|
56
|
-
Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation
|
57
|
-
"""
|
58
|
-
|
59
|
-
human_temp = """Question: {input}
|
60
|
-
|
61
|
-
Thought: {agent_scratchpad}
|
62
|
-
(reminder to respond in a JSON blob no matter what)"""
|
63
|
-
|
64
|
-
|
65
|
-
react_agent_prompt = ChatPromptTemplate(
|
66
|
-
input_variables=[
|
67
|
-
"agent_scratchpad",
|
68
|
-
"input",
|
69
|
-
"tool_names",
|
70
|
-
"tools",
|
71
|
-
"assistant_message",
|
72
|
-
],
|
73
|
-
optional_variables=["chat_history"],
|
74
|
-
input_types={
|
75
|
-
"chat_history": List[
|
76
|
-
Union[
|
77
|
-
AIMessage,
|
78
|
-
HumanMessage,
|
79
|
-
ChatMessage,
|
80
|
-
SystemMessage,
|
81
|
-
FunctionMessage,
|
82
|
-
ToolMessage,
|
83
|
-
]
|
84
|
-
]
|
85
|
-
},
|
86
|
-
messages=[
|
87
|
-
SystemMessagePromptTemplate.from_template(system_temp),
|
88
|
-
MessagesPlaceholder(variable_name="chat_history", optional=True),
|
89
|
-
HumanMessagePromptTemplate.from_template(human_temp),
|
90
|
-
],
|
91
|
-
)
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|