pycoze 0.1.85__py3-none-any.whl → 0.1.87__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pycoze/bot/agent/agent.py +1 -0
- pycoze/bot/agent/agent_types/openai_func_call_agent.py +19 -26
- pycoze/bot/agent/assistant.py +2 -1
- pycoze/bot/bot.py +21 -18
- {pycoze-0.1.85.dist-info → pycoze-0.1.87.dist-info}/METADATA +1 -1
- {pycoze-0.1.85.dist-info → pycoze-0.1.87.dist-info}/RECORD +9 -9
- {pycoze-0.1.85.dist-info → pycoze-0.1.87.dist-info}/LICENSE +0 -0
- {pycoze-0.1.85.dist-info → pycoze-0.1.87.dist-info}/WHEEL +0 -0
- {pycoze-0.1.85.dist-info → pycoze-0.1.87.dist-info}/top_level.txt +0 -0
pycoze/bot/agent/agent.py
CHANGED
@@ -4,7 +4,7 @@ import json
|
|
4
4
|
from langchain.tools import BaseTool
|
5
5
|
from langchain_core.utils.function_calling import convert_to_openai_tool
|
6
6
|
from langchain_core.language_models.base import LanguageModelLike
|
7
|
-
from langchain_core.messages import SystemMessage, ToolMessage
|
7
|
+
from langchain_core.messages import SystemMessage, ToolMessage, HumanMessage
|
8
8
|
from langgraph.graph import END
|
9
9
|
from langgraph.graph.message import MessageGraph
|
10
10
|
from langgraph.prebuilt import ToolExecutor, ToolInvocation
|
@@ -31,7 +31,11 @@ def get_all_markdown_json(content):
|
|
31
31
|
|
32
32
|
|
33
33
|
def create_openai_func_call_agent_executor(
|
34
|
-
tools: list[BaseTool],
|
34
|
+
tools: list[BaseTool],
|
35
|
+
llm: LanguageModelLike,
|
36
|
+
system_message: str,
|
37
|
+
tool_compatibility_mode: str,
|
38
|
+
**kwargs
|
35
39
|
):
|
36
40
|
|
37
41
|
async def _get_messages(messages):
|
@@ -58,7 +62,6 @@ def create_openai_func_call_agent_executor(
|
|
58
62
|
def should_continue(messages):
|
59
63
|
# If there is no FuncCall, then we finish
|
60
64
|
last_message = messages[-1]
|
61
|
-
print("last_message", last_message)
|
62
65
|
if last_message.content.strip().endswith("```"):
|
63
66
|
last_message.content = last_message.content + "\n\n" # 避免影响阅读
|
64
67
|
# if not last_message.tool_calls:
|
@@ -128,7 +131,6 @@ def create_openai_func_call_agent_executor(
|
|
128
131
|
# Based on the continue condition
|
129
132
|
# we know the last message involves a FuncCall
|
130
133
|
last_message = messages[-1]
|
131
|
-
print("last_message", last_message)
|
132
134
|
for tool_call in last_message.additional_kwargs["tool_calls"]:
|
133
135
|
function = tool_call["function"]
|
134
136
|
function_name = function["name"]
|
@@ -167,44 +169,35 @@ def create_openai_func_call_agent_executor(
|
|
167
169
|
additional_kwargs={"name": tool_call["function"]["name"]},
|
168
170
|
)
|
169
171
|
tool_messages.append(message)
|
172
|
+
if tool_compatibility_mode:
|
173
|
+
# HumanMessage
|
174
|
+
tool_msgs_str = repr(tool_messages)
|
175
|
+
tool_messages = [
|
176
|
+
HumanMessage(
|
177
|
+
content="The tool call is done, the result is as follows:\n"
|
178
|
+
+ tool_msgs_str
|
179
|
+
)
|
180
|
+
]
|
170
181
|
return tool_messages
|
171
182
|
|
172
183
|
workflow = MessageGraph()
|
173
184
|
|
174
185
|
# Define the two nodes we will cycle between
|
175
186
|
workflow.add_node("agent", agent)
|
176
|
-
workflow.add_node("
|
187
|
+
workflow.add_node("call_tool", call_tool)
|
177
188
|
|
178
|
-
# Set the entrypoint as `agent`
|
179
|
-
# This means that this node is the first one called
|
180
189
|
workflow.set_entry_point("agent")
|
181
190
|
|
182
|
-
# We now add a conditional edge
|
183
191
|
workflow.add_conditional_edges(
|
184
|
-
# First, we define the start node. We use `agent`.
|
185
|
-
# This means these are the edges taken after the `agent` node is called.
|
186
192
|
"agent",
|
187
|
-
# Next, we pass in the function that will determine which node is called next.
|
188
193
|
should_continue,
|
189
|
-
# Finally we pass in a mapping.
|
190
|
-
# The keys are strings, and the values are other nodes.
|
191
|
-
# END is a special node marking that the graph should finish.
|
192
|
-
# What will happen is we will call `should_continue`, and then the output of that
|
193
|
-
# will be matched against the keys in this mapping.
|
194
|
-
# Based on which one it matches, that node will then be called.
|
195
194
|
{
|
196
|
-
|
197
|
-
"continue": "action",
|
198
|
-
# Otherwise we finish.
|
195
|
+
"continue": "call_tool",
|
199
196
|
"end": END,
|
200
197
|
},
|
201
198
|
)
|
202
199
|
|
203
|
-
#
|
204
|
-
|
205
|
-
workflow.add_edge("action", "agent")
|
200
|
+
# 调用完工具后,再次调用agent
|
201
|
+
workflow.add_edge("call_tool", "agent")
|
206
202
|
|
207
|
-
# Finally, we compile it!
|
208
|
-
# This compiles it into a LangChain Runnable,
|
209
|
-
# meaning you can use it as you would any other runnable
|
210
203
|
return workflow.compile()
|
pycoze/bot/agent/assistant.py
CHANGED
@@ -18,10 +18,11 @@ class Runnable(RunnableBinding):
|
|
18
18
|
tools: Sequence[BaseTool],
|
19
19
|
llm: LanguageModelLike,
|
20
20
|
assistant_message: str,
|
21
|
+
tool_compatibility_mode: bool
|
21
22
|
) -> None:
|
22
23
|
|
23
24
|
agent_executor = create_openai_func_call_agent_executor(
|
24
|
-
tools, llm, assistant_message
|
25
|
+
tools, llm, assistant_message, tool_compatibility_mode
|
25
26
|
)
|
26
27
|
agent_executor = agent_executor.with_config({"recursion_limit": 50})
|
27
28
|
super().__init__(
|
pycoze/bot/bot.py
CHANGED
@@ -9,6 +9,8 @@ from langchain_core.utils.function_calling import convert_to_openai_tool
|
|
9
9
|
|
10
10
|
params = utils.arg.read_params_file()
|
11
11
|
llm_file = params["appPath"] + "/JsonStorage/llm.json"
|
12
|
+
with open(llm_file, "r", encoding="utf-8") as f:
|
13
|
+
cfg = json.load(f)
|
12
14
|
|
13
15
|
|
14
16
|
def load_role_setting(bot_setting_file: str):
|
@@ -29,30 +31,30 @@ def load_tools(bot_setting_file: str):
|
|
29
31
|
def agent_chat(bot_setting_file, history):
|
30
32
|
role_setting = load_role_setting(bot_setting_file)
|
31
33
|
tools = load_tools(bot_setting_file)
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
temperature
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
)
|
34
|
+
|
35
|
+
chat = ChatOpenAI(
|
36
|
+
api_key=cfg["apiKey"],
|
37
|
+
base_url=cfg["baseURL"],
|
38
|
+
model=cfg["model"],
|
39
|
+
temperature=(
|
40
|
+
role_setting["temperature"] * 2
|
41
|
+
if cfg["model"].startswith("deepseek")
|
42
|
+
else role_setting["temperature"]
|
43
|
+
),
|
44
|
+
stop_sequences=[
|
45
|
+
"tool▁calls▁end",
|
46
|
+
"tool▁call▁end",
|
47
|
+
], # 停用deepseek的工具调用标记,不然会虚构工具调用过程和结果
|
48
|
+
)
|
48
49
|
prompt = role_setting["prompt"]
|
49
50
|
if (
|
50
51
|
cfg["model"].startswith("deepseek")
|
51
|
-
or cfg["
|
52
|
+
or cfg["toolCompatibilityMode"]
|
52
53
|
and len(tools) > 0
|
53
54
|
):
|
54
55
|
prompt += """
|
55
|
-
|
56
|
+
如果不确定结果,请务必使用工具查询。
|
57
|
+
如果需要调用工具,请使用以正确markdown中的json代码格式进行结尾(务必保证json格式正确,不要出现反斜杠未转义等问题):
|
56
58
|
```json
|
57
59
|
{"name": 函数名, "parameters": 参数词典}
|
58
60
|
```
|
@@ -66,6 +68,7 @@ def agent_chat(bot_setting_file, history):
|
|
66
68
|
tools=tools,
|
67
69
|
llm=chat,
|
68
70
|
assistant_message=prompt,
|
71
|
+
tool_compatibility_mode=cfg["toolCompatibilityMode"],
|
69
72
|
)
|
70
73
|
return asyncio.run(run_agent(agent, history))
|
71
74
|
|
@@ -6,13 +6,13 @@ pycoze/ai/__init__.py,sha256=Smivpb8qbRnzWkzKRe2IxsmKP5Dh8EvngDFdkD_DVLo,73
|
|
6
6
|
pycoze/ai/comfyui.py,sha256=u75tZywkuXiOdm7XST2kBAaveJKpPvY_qTQr_TN9sXk,795
|
7
7
|
pycoze/ai/vram_reserve.py,sha256=s55Cy-Q5mTq-k5oIPbAFwCfrjatjN0QTjQxW7WBTPZI,5738
|
8
8
|
pycoze/bot/__init__.py,sha256=6HHMxDQVOyZM9dtSjQm9tjGnhj4h7CixD0JOvEwTi48,41
|
9
|
-
pycoze/bot/bot.py,sha256=
|
9
|
+
pycoze/bot/bot.py,sha256=w_ddp3IeWryUgIad-_V-SsDzGwYqZXekw5NXbYyXrkk,3041
|
10
10
|
pycoze/bot/agent/__init__.py,sha256=YR9vpkEQn1e4937r_xFPJXUCPBEJ0SFzEQDBe2x3-YA,157
|
11
|
-
pycoze/bot/agent/agent.py,sha256=
|
12
|
-
pycoze/bot/agent/assistant.py,sha256=
|
11
|
+
pycoze/bot/agent/agent.py,sha256=XMTO6s8OJpaOnymT8ZUuJxXx2ICZ3r7Ck0pHJqPPFIs,3346
|
12
|
+
pycoze/bot/agent/assistant.py,sha256=XI4w-rFfbk3qYE0tWcWoya8dz-3cA-QZ0Sanhl3DbKE,1112
|
13
13
|
pycoze/bot/agent/chat.py,sha256=kc0qgcrBSXdiMy49JwThZTV-0PAvzAhiUvbI5ILiSnU,571
|
14
14
|
pycoze/bot/agent/agent_types/__init__.py,sha256=XNvKWq9REE5Wzjm0OZi3CKIQF2UZ9PZkeUuxgFJbrfc,128
|
15
|
-
pycoze/bot/agent/agent_types/openai_func_call_agent.py,sha256=
|
15
|
+
pycoze/bot/agent/agent_types/openai_func_call_agent.py,sha256=bb6zze6lbdiZi9uSim5oWNviGm5sqLOEY6suSP_xyaI,7591
|
16
16
|
pycoze/ui/__init__.py,sha256=7xAfL2lfG7-jllPJEZUJO89xUE9sNzvo1y0WmBswjBI,458
|
17
17
|
pycoze/ui/base.py,sha256=SCXVDK7PpMaBv6ovvabHcfRq_d2AWM0BRyxpNhuJN5A,1285
|
18
18
|
pycoze/ui/color.py,sha256=cT9Ib8uNzkOKxyW0IwVj46o4LwdB1xgNCj1_Rou9d_4,854
|
@@ -21,8 +21,8 @@ pycoze/ui/ui_def.py,sha256=UhhU_yB3GV9ISbvTWT48hsHPHI250BhMILh6bu5Uioo,4206
|
|
21
21
|
pycoze/utils/__init__.py,sha256=TNJhFfY7JYdLlzuP9GvgxfNXUtbgH_NUUJSqHXCxJn4,78
|
22
22
|
pycoze/utils/arg.py,sha256=kA3KBQzXc2WlH5XbF8kfikfpqljiKaW7oY_GE4Qyffc,753
|
23
23
|
pycoze/utils/text_or_file.py,sha256=gpxZVWt2DW6YiEg_MnMuwg36VNf3TX383QD_1oZNB0Y,551
|
24
|
-
pycoze-0.1.
|
25
|
-
pycoze-0.1.
|
26
|
-
pycoze-0.1.
|
27
|
-
pycoze-0.1.
|
28
|
-
pycoze-0.1.
|
24
|
+
pycoze-0.1.87.dist-info/LICENSE,sha256=QStd_Qsd0-kAam_-sOesCIp_uKrGWeoKwt9M49NVkNU,1090
|
25
|
+
pycoze-0.1.87.dist-info/METADATA,sha256=ilmNEozGi1c7Id-kP6S9J7GiLDQEOS3mvOJKeJIoGn8,719
|
26
|
+
pycoze-0.1.87.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
|
27
|
+
pycoze-0.1.87.dist-info/top_level.txt,sha256=76dPeDhKvOCleL3ZC5gl1-y4vdS1tT_U1hxWVAn7sFo,7
|
28
|
+
pycoze-0.1.87.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|