pycoze 0.1.20__tar.gz → 0.1.23__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pycoze-0.1.20 → pycoze-0.1.23}/PKG-INFO +1 -1
- {pycoze-0.1.20 → pycoze-0.1.23}/pycoze.egg-info/PKG-INFO +1 -1
- pycoze-0.1.23/pycoze.egg-info/SOURCES.txt +7 -0
- pycoze-0.1.23/pycoze.egg-info/top_level.txt +1 -0
- {pycoze-0.1.20 → pycoze-0.1.23}/setup.py +1 -1
- pycoze-0.1.20/pycoze/__init__.py +0 -4
- pycoze-0.1.20/pycoze/bot/__init__.py +0 -1
- pycoze-0.1.20/pycoze/bot/agent/__init__.py +0 -5
- pycoze-0.1.20/pycoze/bot/agent/agent.py +0 -72
- pycoze-0.1.20/pycoze/bot/agent/agent_types/__init__.py +0 -10
- pycoze-0.1.20/pycoze/bot/agent/agent_types/openai_func_call_agent.py +0 -113
- pycoze-0.1.20/pycoze/bot/agent/agent_types/react_agent.py +0 -170
- pycoze-0.1.20/pycoze/bot/agent/agent_types/react_prompt.py +0 -67
- pycoze-0.1.20/pycoze/bot/agent/assistant.py +0 -35
- pycoze-0.1.20/pycoze/bot/agent/chat.py +0 -28
- pycoze-0.1.20/pycoze/bot/base.py +0 -81
- pycoze-0.1.20/pycoze/bot/bot.py +0 -54
- pycoze-0.1.20/pycoze/gpu/__init__.py +0 -1
- pycoze-0.1.20/pycoze/gpu/gpu_reserve.py +0 -167
- pycoze-0.1.20/pycoze/module.py +0 -0
- pycoze-0.1.20/pycoze/ui/__init__.py +0 -1
- pycoze-0.1.20/pycoze/ui/base.py +0 -50
- pycoze-0.1.20/pycoze/ui/color.py +0 -34
- pycoze-0.1.20/pycoze/ui/typ.py +0 -56
- pycoze-0.1.20/pycoze/ui/ui_def.py +0 -167
- pycoze-0.1.20/pycoze/utils/__init__.py +0 -1
- pycoze-0.1.20/pycoze/utils/arg.py +0 -25
- pycoze-0.1.20/pycoze.egg-info/SOURCES.txt +0 -29
- pycoze-0.1.20/pycoze.egg-info/top_level.txt +0 -1
- {pycoze-0.1.20 → pycoze-0.1.23}/LICENSE +0 -0
- {pycoze-0.1.20 → pycoze-0.1.23}/README.md +0 -0
- {pycoze-0.1.20 → pycoze-0.1.23}/pycoze.egg-info/dependency_links.txt +0 -0
- {pycoze-0.1.20 → pycoze-0.1.23}/setup.cfg +0 -0
@@ -0,0 +1 @@
|
|
1
|
+
|
pycoze-0.1.20/pycoze/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1
|
-
from .bot import chat
|
@@ -1,72 +0,0 @@
|
|
1
|
-
import asyncio
|
2
|
-
import json
|
3
|
-
from langchain_openai import ChatOpenAI
|
4
|
-
from .chat import info
|
5
|
-
from .assistant import Runnable
|
6
|
-
from langchain_core.messages import HumanMessage, AIMessage,AIMessageChunk
|
7
|
-
from langchain_core.agents import AgentFinish
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
async def run_agent(agent, inputs: list):
|
12
|
-
if agent.agent_execution_mode == 'FuncCall':
|
13
|
-
exist_ids = set()
|
14
|
-
content_list = []
|
15
|
-
async for event in agent.astream_events(inputs, version="v2"):
|
16
|
-
kind = event["event"]
|
17
|
-
if kind == "on_chat_model_stream":
|
18
|
-
content = event["data"]["chunk"].content
|
19
|
-
if content:
|
20
|
-
content_list.append(content)
|
21
|
-
info("assistant", content)
|
22
|
-
elif kind == "on_chain_start":
|
23
|
-
data = event["data"]
|
24
|
-
if "input" in data:
|
25
|
-
input_list = data["input"] if isinstance(data["input"], list) else [data["input"]]
|
26
|
-
msg = input_list[-1]
|
27
|
-
if isinstance(msg, AIMessage) and not isinstance(msg, AIMessageChunk):
|
28
|
-
if "tool_calls" in msg.additional_kwargs:
|
29
|
-
tool_calls = msg.additional_kwargs["tool_calls"]
|
30
|
-
for t in tool_calls:
|
31
|
-
if t["id"] in exist_ids:
|
32
|
-
continue
|
33
|
-
exist_ids.add(t["id"])
|
34
|
-
tool = t["function"]["name"]
|
35
|
-
info("assistant", f"[调用工具:{tool}]")
|
36
|
-
|
37
|
-
return "".join(content_list)
|
38
|
-
else:
|
39
|
-
assert agent.agent_execution_mode == 'ReAct'
|
40
|
-
inputs_msg = {'input': inputs[-1].content,'chat_history': inputs[:-1]}
|
41
|
-
use_tools = []
|
42
|
-
async for event in agent.astream_events(inputs_msg, version="v2"):
|
43
|
-
kind = event["event"]
|
44
|
-
result = None
|
45
|
-
if kind == "on_chain_end":
|
46
|
-
if 'data' in event:
|
47
|
-
if 'output' in event['data']:
|
48
|
-
output = event['data']['output']
|
49
|
-
if 'agent_outcome' in output and "input" in output:
|
50
|
-
outcome = output['agent_outcome']
|
51
|
-
if isinstance(outcome, AgentFinish):
|
52
|
-
result = outcome.return_values['output']
|
53
|
-
elif kind == "on_tool_start":
|
54
|
-
use_tools.append(event['name'])
|
55
|
-
info("assistant", f"[调用工具:{use_tools}]")
|
56
|
-
return result
|
57
|
-
|
58
|
-
|
59
|
-
if __name__ == "__main__":
|
60
|
-
from langchain_experimental.tools import PythonREPLTool
|
61
|
-
llm_file = r"C:\Users\aiqqq\AppData\Roaming\pycoze\JsonStorage\llm.json"
|
62
|
-
with open(llm_file, "r", encoding="utf-8") as f:
|
63
|
-
cfg = json.load(f)
|
64
|
-
chat = ChatOpenAI(api_key=cfg["apiKey"], base_url=cfg['baseURL'], model=cfg["model"], temperature=0)
|
65
|
-
python_tool = PythonREPLTool()
|
66
|
-
agent = Runnable(agent_execution_mode='FuncCall', # 'FuncCall' or 'ReAct',大模型支持FuncCall的话就用FuncCall
|
67
|
-
tools=[python_tool],
|
68
|
-
llm=chat,
|
69
|
-
assistant_message="请以女友的口吻回答,输出不小于100字,可以随便说点其他的",)
|
70
|
-
|
71
|
-
inputs = [HumanMessage(content="计算根号7+根号88")]
|
72
|
-
print(asyncio.run(run_agent(agent, inputs)))
|
@@ -1,113 +0,0 @@
|
|
1
|
-
# reference:https://github.com/maxtheman/opengpts/blob/d3425b1ba80aec48953a327ecd9a61b80efb0e69/backend/app/agent_types/openai_agent.py
|
2
|
-
import json
|
3
|
-
|
4
|
-
from langchain.tools import BaseTool
|
5
|
-
from langchain_core.utils.function_calling import convert_to_openai_tool
|
6
|
-
from langchain_core.language_models.base import LanguageModelLike
|
7
|
-
from langchain_core.messages import SystemMessage, ToolMessage
|
8
|
-
from langgraph.graph import END
|
9
|
-
from langgraph.graph.message import MessageGraph
|
10
|
-
from langgraph.prebuilt import ToolExecutor, ToolInvocation
|
11
|
-
from typing import Any
|
12
|
-
|
13
|
-
|
14
|
-
def create_openai_func_call_agent_executor(tools: list[BaseTool], llm: LanguageModelLike,
|
15
|
-
system_message: str, **kwargs):
|
16
|
-
|
17
|
-
async def _get_messages(messages):
|
18
|
-
msgs = []
|
19
|
-
for m in messages:
|
20
|
-
if isinstance(m, ToolMessage):
|
21
|
-
_dict = m.dict()
|
22
|
-
_dict['content'] = str(_dict['content'])
|
23
|
-
m_c = ToolMessage(**_dict)
|
24
|
-
msgs.append(m_c)
|
25
|
-
else:
|
26
|
-
msgs.append(m)
|
27
|
-
|
28
|
-
return [SystemMessage(content=system_message)] + msgs
|
29
|
-
|
30
|
-
if tools:
|
31
|
-
llm_with_tools = llm.bind(tools=[convert_to_openai_tool(t) for t in tools])
|
32
|
-
else:
|
33
|
-
llm_with_tools = llm
|
34
|
-
agent = _get_messages | llm_with_tools
|
35
|
-
tool_executor = ToolExecutor(tools)
|
36
|
-
|
37
|
-
# Define the function that determines whether to continue or not
|
38
|
-
def should_continue(messages):
|
39
|
-
# If there is no FuncCall, then we finish
|
40
|
-
last_message = messages[-1]
|
41
|
-
if not last_message.tool_calls:
|
42
|
-
return "end"
|
43
|
-
# Otherwise if there is, we continue
|
44
|
-
else:
|
45
|
-
return "continue"
|
46
|
-
|
47
|
-
# Define the function to execute tools
|
48
|
-
async def call_tool(messages):
|
49
|
-
actions: list[ToolInvocation] = []
|
50
|
-
# Based on the continue condition
|
51
|
-
# we know the last message involves a FuncCall
|
52
|
-
last_message = messages[-1]
|
53
|
-
for tool_call in last_message.additional_kwargs['tool_calls']:
|
54
|
-
function = tool_call['function']
|
55
|
-
function_name = function['name']
|
56
|
-
_tool_input = json.loads(function['arguments'] or '{}')
|
57
|
-
# We construct an ToolInvocation from the function_call
|
58
|
-
actions.append(ToolInvocation(
|
59
|
-
tool=function_name,
|
60
|
-
tool_input=_tool_input,
|
61
|
-
))
|
62
|
-
# We call the tool_executor and get back a response
|
63
|
-
responses = await tool_executor.abatch(actions, **kwargs)
|
64
|
-
# We use the response to create a ToolMessage
|
65
|
-
tool_messages = [
|
66
|
-
ToolMessage(
|
67
|
-
tool_call_id=tool_call['id'],
|
68
|
-
content=response,
|
69
|
-
additional_kwargs={'name': tool_call['function']['name']},
|
70
|
-
)
|
71
|
-
for tool_call, response in zip(last_message.additional_kwargs['tool_calls'], responses)
|
72
|
-
]
|
73
|
-
return tool_messages
|
74
|
-
|
75
|
-
workflow = MessageGraph()
|
76
|
-
|
77
|
-
# Define the two nodes we will cycle between
|
78
|
-
workflow.add_node('agent', agent)
|
79
|
-
workflow.add_node('action', call_tool)
|
80
|
-
|
81
|
-
# Set the entrypoint as `agent`
|
82
|
-
# This means that this node is the first one called
|
83
|
-
workflow.set_entry_point('agent')
|
84
|
-
|
85
|
-
# We now add a conditional edge
|
86
|
-
workflow.add_conditional_edges(
|
87
|
-
# First, we define the start node. We use `agent`.
|
88
|
-
# This means these are the edges taken after the `agent` node is called.
|
89
|
-
'agent',
|
90
|
-
# Next, we pass in the function that will determine which node is called next.
|
91
|
-
should_continue,
|
92
|
-
# Finally we pass in a mapping.
|
93
|
-
# The keys are strings, and the values are other nodes.
|
94
|
-
# END is a special node marking that the graph should finish.
|
95
|
-
# What will happen is we will call `should_continue`, and then the output of that
|
96
|
-
# will be matched against the keys in this mapping.
|
97
|
-
# Based on which one it matches, that node will then be called.
|
98
|
-
{
|
99
|
-
# If `tools`, then we call the tool node.
|
100
|
-
'continue': 'action',
|
101
|
-
# Otherwise we finish.
|
102
|
-
'end': END,
|
103
|
-
},
|
104
|
-
)
|
105
|
-
|
106
|
-
# We now add a normal edge from `tools` to `agent`.
|
107
|
-
# This means that after `tools` is called, `agent` node is called next.
|
108
|
-
workflow.add_edge('action', 'agent')
|
109
|
-
|
110
|
-
# Finally, we compile it!
|
111
|
-
# This compiles it into a LangChain Runnable,
|
112
|
-
# meaning you can use it as you would any other runnable
|
113
|
-
return workflow.compile()
|
@@ -1,170 +0,0 @@
|
|
1
|
-
# https://github.com/langchain-ai/langgraph/blob/ea071935fef240d631305df12b6d83e9c363cef3/libs/langgraph/langgraph/prebuilt/agent_executor.py
|
2
|
-
import operator
|
3
|
-
from typing import Annotated, Sequence, TypedDict, Union
|
4
|
-
from langchain.tools import BaseTool
|
5
|
-
from langchain_core.agents import AgentAction, AgentFinish
|
6
|
-
from langchain_core.messages import BaseMessage
|
7
|
-
from langchain_core.language_models import LanguageModelLike
|
8
|
-
from langgraph.graph import END, StateGraph
|
9
|
-
from langgraph.graph.state import CompiledStateGraph
|
10
|
-
from langgraph.prebuilt.tool_executor import ToolExecutor
|
11
|
-
from langgraph.utils.runnable import RunnableCallable
|
12
|
-
from langchain.agents import create_structured_chat_agent
|
13
|
-
from .react_prompt import react_agent_prompt
|
14
|
-
|
15
|
-
|
16
|
-
def create_react_agent_executor(
|
17
|
-
tools: list[BaseTool],
|
18
|
-
llm: LanguageModelLike,
|
19
|
-
system_message: str,
|
20
|
-
**kwargs # ignore
|
21
|
-
):
|
22
|
-
prompt = react_agent_prompt.partial(assistant_message=system_message)
|
23
|
-
agent = create_structured_chat_agent(llm, tools, prompt)
|
24
|
-
agent_executer = create_agent_executor(agent, tools)
|
25
|
-
return agent_executer
|
26
|
-
|
27
|
-
|
28
|
-
def _get_agent_state(input_schema=None):
|
29
|
-
if input_schema is None:
|
30
|
-
|
31
|
-
class AgentState(TypedDict):
|
32
|
-
# The input string
|
33
|
-
input: str
|
34
|
-
# The list of previous messages in the conversation
|
35
|
-
chat_history: Sequence[BaseMessage]
|
36
|
-
# The outcome of a given call to the agent
|
37
|
-
# Needs `None` as a valid type, since this is what this will start as
|
38
|
-
agent_outcome: Union[AgentAction, AgentFinish, None]
|
39
|
-
# List of actions and corresponding observations
|
40
|
-
# Here we annotate this with `operator.add` to indicate that operations to
|
41
|
-
# this state should be ADDED to the existing values (not overwrite it)
|
42
|
-
intermediate_steps: Annotated[list[tuple[AgentAction, str]], operator.add]
|
43
|
-
|
44
|
-
else:
|
45
|
-
|
46
|
-
class AgentState(input_schema):
|
47
|
-
# The outcome of a given call to the agent
|
48
|
-
# Needs `None` as a valid type, since this is what this will start as
|
49
|
-
agent_outcome: Union[AgentAction, AgentFinish, None]
|
50
|
-
# List of actions and corresponding observations
|
51
|
-
# Here we annotate this with `operator.add` to indicate that operations to
|
52
|
-
# this state should be ADDED to the existing values (not overwrite it)
|
53
|
-
intermediate_steps: Annotated[list[tuple[AgentAction, str]], operator.add]
|
54
|
-
|
55
|
-
return AgentState
|
56
|
-
|
57
|
-
|
58
|
-
def create_agent_executor(
|
59
|
-
agent_runnable, tools, input_schema=None
|
60
|
-
) -> CompiledStateGraph:
|
61
|
-
"""This is a helper function for creating a graph that works with LangChain Agents.
|
62
|
-
|
63
|
-
Args:
|
64
|
-
agent_runnable (RunnableLike): The agent runnable.
|
65
|
-
tools (list): A list of tools to be used by the agent.
|
66
|
-
input_schema (dict, optional): The input schema for the agent. Defaults to None.
|
67
|
-
|
68
|
-
Returns:
|
69
|
-
The `CompiledStateGraph` object.
|
70
|
-
"""
|
71
|
-
|
72
|
-
if isinstance(tools, ToolExecutor):
|
73
|
-
tool_executor = tools
|
74
|
-
else:
|
75
|
-
tool_executor = ToolExecutor(tools)
|
76
|
-
|
77
|
-
state = _get_agent_state(input_schema)
|
78
|
-
|
79
|
-
# Define logic that will be used to determine which conditional edge to go down
|
80
|
-
|
81
|
-
def should_continue(data):
|
82
|
-
# If the agent outcome is an AgentFinish, then we return `exit` string
|
83
|
-
# This will be used when setting up the graph to define the flow
|
84
|
-
if isinstance(data["agent_outcome"], AgentFinish):
|
85
|
-
return "end"
|
86
|
-
# Otherwise, an AgentAction is returned
|
87
|
-
# Here we return `continue` string
|
88
|
-
# This will be used when setting up the graph to define the flow
|
89
|
-
else:
|
90
|
-
return "continue"
|
91
|
-
|
92
|
-
def run_agent(data, config):
|
93
|
-
agent_outcome = agent_runnable.invoke(data, config)
|
94
|
-
return {"agent_outcome": agent_outcome}
|
95
|
-
|
96
|
-
async def arun_agent(data, config):
|
97
|
-
agent_outcome = await agent_runnable.ainvoke(data, config)
|
98
|
-
return {"agent_outcome": agent_outcome}
|
99
|
-
|
100
|
-
# Define the function to execute tools
|
101
|
-
def execute_tools(data, config):
|
102
|
-
# Get the most recent agent_outcome - this is the key added in the `agent` above
|
103
|
-
agent_action = data["agent_outcome"]
|
104
|
-
if not isinstance(agent_action, list):
|
105
|
-
agent_action = [agent_action]
|
106
|
-
output = tool_executor.batch(agent_action, config, return_exceptions=True)
|
107
|
-
return {
|
108
|
-
"intermediate_steps": [
|
109
|
-
(action, str(out)) for action, out in zip(agent_action, output)
|
110
|
-
]
|
111
|
-
}
|
112
|
-
|
113
|
-
async def aexecute_tools(data, config):
|
114
|
-
# Get the most recent agent_outcome - this is the key added in the `agent` above
|
115
|
-
agent_action = data["agent_outcome"]
|
116
|
-
if not isinstance(agent_action, list):
|
117
|
-
agent_action = [agent_action]
|
118
|
-
output = await tool_executor.abatch(
|
119
|
-
agent_action, config, return_exceptions=True
|
120
|
-
)
|
121
|
-
return {
|
122
|
-
"intermediate_steps": [
|
123
|
-
(action, str(out)) for action, out in zip(agent_action, output)
|
124
|
-
]
|
125
|
-
}
|
126
|
-
|
127
|
-
# Define a new graph
|
128
|
-
workflow = StateGraph(state)
|
129
|
-
|
130
|
-
# Define the two nodes we will cycle between
|
131
|
-
workflow.add_node("agent", RunnableCallable(run_agent, arun_agent))
|
132
|
-
workflow.add_node("tools", RunnableCallable(execute_tools, aexecute_tools))
|
133
|
-
|
134
|
-
# Set the entrypoint as `agent`
|
135
|
-
# This means that this node is the first one called
|
136
|
-
workflow.set_entry_point("agent")
|
137
|
-
|
138
|
-
# We now add a conditional edge
|
139
|
-
workflow.add_conditional_edges(
|
140
|
-
# First, we define the start node. We use `agent`.
|
141
|
-
# This means these are the edges taken after the `agent` node is called.
|
142
|
-
"agent",
|
143
|
-
# Next, we pass in the function that will determine which node is called next.
|
144
|
-
should_continue,
|
145
|
-
# Finally we pass in a mapping.
|
146
|
-
# The keys are strings, and the values are other nodes.
|
147
|
-
# END is a special node marking that the graph should finish.
|
148
|
-
# What will happen is we will call `should_continue`, and then the output of that
|
149
|
-
# will be matched against the keys in this mapping.
|
150
|
-
# Based on which one it matches, that node will then be called.
|
151
|
-
{
|
152
|
-
# If `tools`, then we call the tool node.
|
153
|
-
"continue": "tools",
|
154
|
-
# Otherwise we finish.
|
155
|
-
"end": END,
|
156
|
-
},
|
157
|
-
)
|
158
|
-
|
159
|
-
# We now add a normal edge from `tools` to `agent`.
|
160
|
-
# This means that after `tools` is called, `agent` node is called next.
|
161
|
-
workflow.add_edge("tools", "agent")
|
162
|
-
|
163
|
-
# Finally, we compile it!
|
164
|
-
# This compiles it into a LangChain Runnable,
|
165
|
-
# meaning you can use it as you would any other runnable
|
166
|
-
return workflow.compile()
|
167
|
-
|
168
|
-
|
169
|
-
if __name__ == "__main__":
|
170
|
-
pass
|
@@ -1,67 +0,0 @@
|
|
1
|
-
from typing import List, Union
|
2
|
-
from langchain_core.prompts.chat import (
|
3
|
-
ChatPromptTemplate,
|
4
|
-
HumanMessagePromptTemplate,
|
5
|
-
SystemMessagePromptTemplate,
|
6
|
-
MessagesPlaceholder
|
7
|
-
)
|
8
|
-
from langchain_core.messages import FunctionMessage, SystemMessage, ToolMessage, AIMessage, HumanMessage, ChatMessage
|
9
|
-
|
10
|
-
|
11
|
-
system_temp = """
|
12
|
-
{assistant_message}
|
13
|
-
|
14
|
-
Respond to the human as helpfully and accurately as possible. You have access to the following tools:
|
15
|
-
|
16
|
-
{tools}
|
17
|
-
|
18
|
-
Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
|
19
|
-
|
20
|
-
Valid "action" values: "Final Answer" or {tool_names}
|
21
|
-
|
22
|
-
Provide only ONE action per $JSON_BLOB, as shown:
|
23
|
-
|
24
|
-
```
|
25
|
-
{{{{
|
26
|
-
"action": $TOOL_NAME,
|
27
|
-
"action_input": $INPUT
|
28
|
-
}}}}
|
29
|
-
```
|
30
|
-
|
31
|
-
Follow this format:
|
32
|
-
|
33
|
-
Question: input question to answer
|
34
|
-
Thought: consider previous and subsequent steps
|
35
|
-
Action:
|
36
|
-
```
|
37
|
-
$JSON_BLOB
|
38
|
-
```
|
39
|
-
Observation: action result
|
40
|
-
... (repeat Thought/Action/Observation N times)
|
41
|
-
Thought: I know what to respond
|
42
|
-
Action:
|
43
|
-
```
|
44
|
-
{{{{
|
45
|
-
"action": "Final Answer",
|
46
|
-
"action_input": "Final response to human"
|
47
|
-
}}}}
|
48
|
-
|
49
|
-
Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation
|
50
|
-
"""
|
51
|
-
|
52
|
-
human_temp = """Question: {input}
|
53
|
-
|
54
|
-
Thought: {agent_scratchpad}
|
55
|
-
(reminder to respond in a JSON blob no matter what)"""
|
56
|
-
|
57
|
-
|
58
|
-
react_agent_prompt = ChatPromptTemplate(
|
59
|
-
input_variables=['agent_scratchpad', 'input', 'tool_names', 'tools', 'assistant_message'],
|
60
|
-
optional_variables=['chat_history'],
|
61
|
-
input_types={'chat_history': List[Union[AIMessage, HumanMessage, ChatMessage, SystemMessage, FunctionMessage, ToolMessage]]},
|
62
|
-
messages=[
|
63
|
-
SystemMessagePromptTemplate.from_template(system_temp),
|
64
|
-
MessagesPlaceholder(variable_name='chat_history', optional=True),
|
65
|
-
HumanMessagePromptTemplate.from_template(human_temp)
|
66
|
-
]
|
67
|
-
)
|
@@ -1,35 +0,0 @@
|
|
1
|
-
from typing import Sequence
|
2
|
-
from langchain.tools import BaseTool
|
3
|
-
from langchain_core.language_models.base import LanguageModelLike
|
4
|
-
from langchain_core.runnables import RunnableBinding
|
5
|
-
from .agent_types import create_openai_func_call_agent_executor, create_react_agent_executor
|
6
|
-
|
7
|
-
|
8
|
-
class Runnable(RunnableBinding):
|
9
|
-
agent_execution_mode: str
|
10
|
-
tools: Sequence[BaseTool]
|
11
|
-
llm: LanguageModelLike
|
12
|
-
assistant_message: str
|
13
|
-
|
14
|
-
def __init__(
|
15
|
-
self,
|
16
|
-
*,
|
17
|
-
agent_execution_mode: str,
|
18
|
-
tools: Sequence[BaseTool],
|
19
|
-
llm: LanguageModelLike,
|
20
|
-
assistant_message: str,
|
21
|
-
) -> None:
|
22
|
-
|
23
|
-
if agent_execution_mode == "FuncCall":
|
24
|
-
agent_executor_object = create_openai_func_call_agent_executor
|
25
|
-
else:
|
26
|
-
agent_executor_object = create_react_agent_executor
|
27
|
-
agent_executor = agent_executor_object(tools, llm, assistant_message)
|
28
|
-
agent_executor = agent_executor.with_config({"recursion_limit": 50})
|
29
|
-
super().__init__(
|
30
|
-
tools=tools,
|
31
|
-
llm=llm,
|
32
|
-
agent_execution_mode=agent_execution_mode,
|
33
|
-
assistant_message=assistant_message,
|
34
|
-
bound=agent_executor, return_intermediate_steps=True
|
35
|
-
)
|
@@ -1,28 +0,0 @@
|
|
1
|
-
import json
|
2
|
-
from langchain_core.messages import HumanMessage, AIMessage
|
3
|
-
|
4
|
-
|
5
|
-
INPUT_MESSAGE = "INPUT_MESSAGE=>"
|
6
|
-
_OUTPUT_MESSAGE = "OUTPUT_MESSAGE=>"
|
7
|
-
_INFOMATION_MESSAGE = "INFOMATION_MESSAGE=>"
|
8
|
-
_LOG = "LOG=>"
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
def log(content, *args, end='\n', **kwargs):
|
13
|
-
print(_LOG + content, *args, end=end, **kwargs)
|
14
|
-
|
15
|
-
|
16
|
-
def output(role, content, history):
|
17
|
-
print(_OUTPUT_MESSAGE + json.dumps({"role": role, "content": content}))
|
18
|
-
if role == "assistant":
|
19
|
-
history.append(AIMessage(content=content))
|
20
|
-
|
21
|
-
elif role == "user":
|
22
|
-
history.append(HumanMessage(content=content))
|
23
|
-
else:
|
24
|
-
raise ValueError("Invalid role")
|
25
|
-
return history
|
26
|
-
|
27
|
-
def info(role, content):
|
28
|
-
print(_INFOMATION_MESSAGE + json.dumps({"role": role, "content": content}))
|
pycoze-0.1.20/pycoze/bot/base.py
DELETED
@@ -1,81 +0,0 @@
|
|
1
|
-
import sys
|
2
|
-
import os
|
3
|
-
import argparse
|
4
|
-
import importlib
|
5
|
-
from langchain.agents import tool as _tool
|
6
|
-
import types
|
7
|
-
import langchain_core
|
8
|
-
|
9
|
-
def wrapped_tool(tool, module_path):
|
10
|
-
old_tool_fun = tool.func
|
11
|
-
def _wrapped_tool(*args, **kwargs):
|
12
|
-
print(f"调用了{tool.name}")
|
13
|
-
old_path = os.getcwd()
|
14
|
-
try:
|
15
|
-
sys.path.insert(0, module_path) # 插入到第一个位置
|
16
|
-
os.chdir(module_path)
|
17
|
-
result = old_tool_fun(*args, **kwargs)
|
18
|
-
finally:
|
19
|
-
sys.path.remove(module_path)
|
20
|
-
os.chdir(old_path)
|
21
|
-
print(f"{tool.name}调用完毕,结果为:", result)
|
22
|
-
return result
|
23
|
-
return _wrapped_tool
|
24
|
-
|
25
|
-
|
26
|
-
def import_tools(tool_id):
|
27
|
-
tool_path = "../../tool"
|
28
|
-
old_path = os.getcwd()
|
29
|
-
module_path = os.path.join(tool_path, tool_id)
|
30
|
-
module_path = os.path.normpath(os.path.abspath(module_path))
|
31
|
-
|
32
|
-
if not os.path.exists(module_path):
|
33
|
-
print(f"Tool {tool_id} not found")
|
34
|
-
return []
|
35
|
-
|
36
|
-
# 保存当前的 sys.modules 状态
|
37
|
-
original_modules = sys.modules.copy()
|
38
|
-
|
39
|
-
try:
|
40
|
-
sys.path.insert(0, module_path) # 插入到第一个位置
|
41
|
-
os.chdir(module_path)
|
42
|
-
module = importlib.import_module("tool")
|
43
|
-
export_tools = getattr(module, "export_tools")
|
44
|
-
temp_list = []
|
45
|
-
for tool in export_tools:
|
46
|
-
assert isinstance(tool, langchain_core.tools.StructuredTool) or isinstance(tool, types.FunctionType), f"Tool is not a StructuredTool or function: {tool}"
|
47
|
-
if isinstance(tool, types.FunctionType) and not isinstance(tool, langchain_core.tools.StructuredTool):
|
48
|
-
temp_list.append(_tool(tool))
|
49
|
-
export_tools = temp_list
|
50
|
-
|
51
|
-
except Exception as e:
|
52
|
-
print(f"Error loading tool {tool_id}: {e}")
|
53
|
-
sys.path.remove(module_path)
|
54
|
-
os.chdir(old_path)
|
55
|
-
return []
|
56
|
-
|
57
|
-
# 卸载模块并恢复 sys.modules 状态
|
58
|
-
importlib.invalidate_caches()
|
59
|
-
for key in list(sys.modules.keys()):
|
60
|
-
if key not in original_modules:
|
61
|
-
del sys.modules[key]
|
62
|
-
|
63
|
-
sys.path.remove(module_path)
|
64
|
-
os.chdir(old_path)
|
65
|
-
|
66
|
-
for tool in export_tools:
|
67
|
-
tool.func = wrapped_tool(tool, module_path)
|
68
|
-
|
69
|
-
return export_tools
|
70
|
-
|
71
|
-
|
72
|
-
def read_arg(param: str, is_path=False):
|
73
|
-
parser = argparse.ArgumentParser()
|
74
|
-
parser.add_argument(param, nargs='?', help=f'Parameter {param}')
|
75
|
-
args = parser.parse_args()
|
76
|
-
value = getattr(args, param.lstrip('-'))
|
77
|
-
# 如果是路径并且有引号,去掉引号
|
78
|
-
if is_path and value and value.startswith('"') and value.endswith('"'):
|
79
|
-
value = value[1:-1]
|
80
|
-
|
81
|
-
return value
|
pycoze-0.1.20/pycoze/bot/bot.py
DELETED
@@ -1,54 +0,0 @@
|
|
1
|
-
import json
|
2
|
-
from langchain_openai import ChatOpenAI
|
3
|
-
from .base import import_tools
|
4
|
-
from .agent import run_agent, Runnable, INPUT_MESSAGE, output
|
5
|
-
import asyncio
|
6
|
-
from langchain_core.messages import HumanMessage
|
7
|
-
from pycoze import utils
|
8
|
-
|
9
|
-
|
10
|
-
params = utils.arg.read_params()
|
11
|
-
llm_file = params["appPath"] + "/JsonStorage/llm.json"
|
12
|
-
|
13
|
-
def load_role_setting(bot_setting_file:str):
|
14
|
-
with open(bot_setting_file, "r", encoding="utf-8") as f:
|
15
|
-
return json.load(f)
|
16
|
-
|
17
|
-
def load_tools(bot_setting_file:str):
|
18
|
-
with open(bot_setting_file, "r", encoding="utf-8") as f:
|
19
|
-
role_setting = json.load(f)
|
20
|
-
|
21
|
-
tools = []
|
22
|
-
for tool_id in role_setting["tools"]:
|
23
|
-
tools.extend(import_tools(tool_id))
|
24
|
-
return tools
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
def chat(bot_setting_file:str):
|
30
|
-
history = []
|
31
|
-
|
32
|
-
while True:
|
33
|
-
message = input()
|
34
|
-
role_setting = load_role_setting(bot_setting_file)
|
35
|
-
tools = load_tools(bot_setting_file)
|
36
|
-
if not message.startswith(INPUT_MESSAGE):
|
37
|
-
raise ValueError("Invalid message")
|
38
|
-
message = json.loads(message[len(INPUT_MESSAGE):])["content"]
|
39
|
-
print("user:", message)
|
40
|
-
|
41
|
-
with open(llm_file, "r", encoding="utf-8") as f:
|
42
|
-
cfg = json.load(f)
|
43
|
-
chat = ChatOpenAI(api_key=cfg["apiKey"], base_url=cfg['baseURL'], model=cfg["model"], temperature=role_setting["temperature"])
|
44
|
-
|
45
|
-
|
46
|
-
agent = Runnable(agent_execution_mode='FuncCall', # 'FuncCall' or 'ReAct',大模型支持FuncCall的话就用FuncCall
|
47
|
-
tools=tools,
|
48
|
-
llm=chat,
|
49
|
-
assistant_message=role_setting["prompt"],)
|
50
|
-
|
51
|
-
history += [HumanMessage(content=message)]
|
52
|
-
result = asyncio.run(run_agent(agent, history))
|
53
|
-
output("assistant", result, history)
|
54
|
-
|
@@ -1 +0,0 @@
|
|
1
|
-
from .gpu_reserve import reserve_gpu_retry, release_gpu, reserve_gpu_retry
|
@@ -1,167 +0,0 @@
|
|
1
|
-
import subprocess
|
2
|
-
import sqlite3
|
3
|
-
import atexit
|
4
|
-
import time
|
5
|
-
import os
|
6
|
-
import psutil
|
7
|
-
|
8
|
-
try:
|
9
|
-
from pycoze.utils import utils
|
10
|
-
|
11
|
-
# 定义数据库连接和初始化
|
12
|
-
params = utils.arg.read_params()
|
13
|
-
if params:
|
14
|
-
DATABASE_PATH = params["appPath"] + "/gpu_usage.db"
|
15
|
-
else:
|
16
|
-
raise Exception("No params")
|
17
|
-
except:
|
18
|
-
DATABASE_DIR = os.path.expanduser("~/pycoze")
|
19
|
-
os.makedirs(DATABASE_DIR, exist_ok=True)
|
20
|
-
DATABASE_PATH = os.path.join(DATABASE_DIR, "gpu_usage.db")
|
21
|
-
TABLE_NAME = "gpu_usage"
|
22
|
-
|
23
|
-
|
24
|
-
def initialize_db():
|
25
|
-
conn = sqlite3.connect(DATABASE_PATH)
|
26
|
-
cursor = conn.cursor()
|
27
|
-
cursor.execute(
|
28
|
-
f"""
|
29
|
-
CREATE TABLE IF NOT EXISTS {TABLE_NAME} (
|
30
|
-
id INTEGER PRIMARY KEY,
|
31
|
-
process_id TEXT NOT NULL,
|
32
|
-
reserved_gb REAL NOT NULL
|
33
|
-
)
|
34
|
-
"""
|
35
|
-
)
|
36
|
-
conn.commit()
|
37
|
-
conn.close()
|
38
|
-
|
39
|
-
|
40
|
-
# 检测GPU资源
|
41
|
-
def get_gpu_resources():
|
42
|
-
try:
|
43
|
-
# 使用nvidia-smi命令获取GPU信息
|
44
|
-
result = subprocess.run(
|
45
|
-
["nvidia-smi", "--query-gpu=memory.free", "--format=csv,noheader,nounits"],
|
46
|
-
stdout=subprocess.PIPE,
|
47
|
-
text=True,
|
48
|
-
)
|
49
|
-
free_memory = result.stdout.strip().split("\n")
|
50
|
-
total_free_memory = sum(float(mem) for mem in free_memory)
|
51
|
-
|
52
|
-
# 获取正在使用GPU的进程信息
|
53
|
-
process_result = subprocess.run(
|
54
|
-
[
|
55
|
-
"nvidia-smi",
|
56
|
-
"--query-compute-apps=pid,process_name,used_memory",
|
57
|
-
"--format=csv,noheader,nounits",
|
58
|
-
],
|
59
|
-
stdout=subprocess.PIPE,
|
60
|
-
text=True,
|
61
|
-
)
|
62
|
-
process_info = process_result.stdout.strip().split("\n")
|
63
|
-
|
64
|
-
# 过滤掉进程名中包含"python"的进程
|
65
|
-
python_memory_usage = 0.0
|
66
|
-
for process in process_info:
|
67
|
-
pid, process_name, used_memory = process.split(", ")
|
68
|
-
if "python" in process_name.lower():
|
69
|
-
python_memory_usage += float(used_memory)
|
70
|
-
|
71
|
-
# 计算排除python进程后的总空闲内存
|
72
|
-
total_free_memory -= python_memory_usage
|
73
|
-
return round(total_free_memory / 1024, 2)
|
74
|
-
except Exception as e:
|
75
|
-
print(f"Error getting GPU resources: {e}")
|
76
|
-
return 0.0
|
77
|
-
|
78
|
-
|
79
|
-
# 预留GPU资源
|
80
|
-
def reserve_gpu(gb):
|
81
|
-
process_id = str(os.getpid())
|
82
|
-
with sqlite3.connect(DATABASE_PATH) as conn:
|
83
|
-
cursor = conn.cursor()
|
84
|
-
cursor.execute(f"SELECT SUM(reserved_gb) FROM {TABLE_NAME}")
|
85
|
-
total_reserved = cursor.fetchone()[0]
|
86
|
-
if total_reserved is None:
|
87
|
-
total_reserved = 0.0
|
88
|
-
available_gb = get_gpu_resources() - total_reserved
|
89
|
-
if available_gb >= gb:
|
90
|
-
cursor.execute(
|
91
|
-
f"INSERT INTO {TABLE_NAME} (process_id, reserved_gb) VALUES (?, ?)",
|
92
|
-
(process_id, gb),
|
93
|
-
)
|
94
|
-
conn.commit()
|
95
|
-
print(f"预留成功,剩余GPU大小: {available_gb - gb} GB")
|
96
|
-
return True
|
97
|
-
else:
|
98
|
-
print(f"预留失败,剩余GPU大小: {available_gb} GB")
|
99
|
-
return False
|
100
|
-
|
101
|
-
|
102
|
-
def reserve_gpu_retry(gb, retry=10000):
|
103
|
-
for i in range(retry):
|
104
|
-
time.sleep(1)
|
105
|
-
if reserve_gpu(gb):
|
106
|
-
if i % 10 == 0 or i < 10:
|
107
|
-
print(f"重试第{i}次")
|
108
|
-
return True
|
109
|
-
return False
|
110
|
-
|
111
|
-
|
112
|
-
# 释放GPU资源
|
113
|
-
def release_gpu():
|
114
|
-
process_id = str(os.getpid())
|
115
|
-
with sqlite3.connect(DATABASE_PATH) as conn:
|
116
|
-
cursor = conn.cursor()
|
117
|
-
cursor.execute(f"DELETE FROM {TABLE_NAME} WHERE process_id = ?", (process_id,))
|
118
|
-
conn.commit()
|
119
|
-
# 计算释放后的剩余GPU大小
|
120
|
-
cursor.execute(f"SELECT SUM(reserved_gb) FROM {TABLE_NAME}")
|
121
|
-
total_reserved = cursor.fetchone()[0]
|
122
|
-
if total_reserved is None:
|
123
|
-
total_reserved = 0.0
|
124
|
-
available_gb = get_gpu_resources() - total_reserved
|
125
|
-
print(f"释放成功,剩余GPU大小: {available_gb} GB")
|
126
|
-
|
127
|
-
|
128
|
-
# 注册退出时的清理函数
|
129
|
-
def cleanup():
|
130
|
-
release_gpu()
|
131
|
-
print("程序退出,GPU资源已释放")
|
132
|
-
|
133
|
-
|
134
|
-
def initialize_and_check():
|
135
|
-
initialize_db()
|
136
|
-
with sqlite3.connect(DATABASE_PATH) as conn:
|
137
|
-
cursor = conn.cursor()
|
138
|
-
cursor.execute(f"SELECT process_id, reserved_gb FROM {TABLE_NAME}")
|
139
|
-
rows = cursor.fetchall()
|
140
|
-
for row in rows:
|
141
|
-
process_id, reserved_gb = row
|
142
|
-
try:
|
143
|
-
# 检查进程是否存在
|
144
|
-
psutil.Process(int(process_id))
|
145
|
-
except psutil.NoSuchProcess:
|
146
|
-
# 进程不存在,删除对应的记录
|
147
|
-
cursor.execute(
|
148
|
-
f"DELETE FROM {TABLE_NAME} WHERE process_id = ?", (process_id,)
|
149
|
-
)
|
150
|
-
print(f"进程 {process_id} 不存在,已删除对应的预留记录")
|
151
|
-
conn.commit()
|
152
|
-
|
153
|
-
|
154
|
-
# 在模块加载时执行初始化检查
|
155
|
-
initialize_and_check()
|
156
|
-
|
157
|
-
# 注册清理函数
|
158
|
-
atexit.register(cleanup)
|
159
|
-
|
160
|
-
if __name__ == "__main__":
|
161
|
-
if reserve_gpu_retry(10):
|
162
|
-
print("GPU资源预留成功")
|
163
|
-
time.sleep(100)
|
164
|
-
release_gpu()
|
165
|
-
print("GPU资源释放成功")
|
166
|
-
else:
|
167
|
-
print("GPU资源不足,无法预留")
|
pycoze-0.1.20/pycoze/module.py
DELETED
File without changes
|
@@ -1 +0,0 @@
|
|
1
|
-
from .ui_def import get_ui, get_ui_text, label, number, text, textarea, password, color, checkbox, single_select, multi_select, file, folder, folder_tree, image, audio, video, seed
|
pycoze-0.1.20/pycoze/ui/base.py
DELETED
@@ -1,50 +0,0 @@
|
|
1
|
-
import json
|
2
|
-
from pycoze import utils
|
3
|
-
import json
|
4
|
-
import inspect
|
5
|
-
|
6
|
-
|
7
|
-
params_file = utils.read_arg('params_file', True)
|
8
|
-
params = None
|
9
|
-
try:
|
10
|
-
with open(params_file, 'r', encoding='utf-8') as f:
|
11
|
-
params = json.load(f)
|
12
|
-
except Exception as e:
|
13
|
-
print(e)
|
14
|
-
|
15
|
-
def get_ui():
|
16
|
-
if params is None:
|
17
|
-
return {}
|
18
|
-
stack = inspect.stack()
|
19
|
-
stack_files = list(reversed([s.filename.replace('\\', '/') for s in stack]))
|
20
|
-
match_item = None
|
21
|
-
for f in stack_files:
|
22
|
-
for v in params["uiRecords"]:
|
23
|
-
if v["uiFile"] == f:
|
24
|
-
match_item = v
|
25
|
-
break
|
26
|
-
if match_item:
|
27
|
-
break
|
28
|
-
|
29
|
-
if not match_item:
|
30
|
-
raise Exception("uiRecords not found for file")
|
31
|
-
|
32
|
-
by = match_item["by"]
|
33
|
-
|
34
|
-
for i in range(10):
|
35
|
-
try:
|
36
|
-
if by == 'file':
|
37
|
-
json_file = match_item["Jsonfile"]
|
38
|
-
with open(json_file, "r", encoding='utf-8') as f:
|
39
|
-
return json.load(f)
|
40
|
-
else:
|
41
|
-
assert by == 'node'
|
42
|
-
workflow_file = match_item["workflowFile"]
|
43
|
-
node_id = match_item["nodeId"]
|
44
|
-
with open(workflow_file, "r", encoding='utf-8') as f:
|
45
|
-
cells = json.load(f)["graph"]["cells"]
|
46
|
-
node = [cell for cell in cells if cell["id"] == node_id][0]
|
47
|
-
return json.loads(node["data"]["ui"])
|
48
|
-
except Exception as e:
|
49
|
-
if i == 9:
|
50
|
-
raise e
|
pycoze-0.1.20/pycoze/ui/color.py
DELETED
@@ -1,34 +0,0 @@
|
|
1
|
-
from enum import Enum
|
2
|
-
|
3
|
-
class ColorFormat(Enum):
|
4
|
-
HEX = "hex"
|
5
|
-
RGB = "rgb"
|
6
|
-
HSL = "hsl"
|
7
|
-
|
8
|
-
|
9
|
-
def hex_to_rgb(hex_color):
|
10
|
-
# Convert HEX to RGB
|
11
|
-
hex_color = hex_color.lstrip('#')
|
12
|
-
return tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4))
|
13
|
-
|
14
|
-
|
15
|
-
def rgb_to_hsl(rgb):
|
16
|
-
r, g, b = rgb[0] / 255.0, rgb[1] / 255.0, rgb[2] / 255.0
|
17
|
-
max_val = max(r, g, b)
|
18
|
-
min_val = min(r, g, b)
|
19
|
-
lightness = (max_val + min_val) / 2 # 直接用l容易混淆
|
20
|
-
|
21
|
-
if max_val == min_val:
|
22
|
-
h, s = 0, 0
|
23
|
-
else:
|
24
|
-
d = max_val - min_val
|
25
|
-
s = d / (2 - max_val - min_val) if lightness > 0.5 else d / (max_val + min_val)
|
26
|
-
if max_val == r:
|
27
|
-
h = (g - b) / d + (6 if g < b else 0)
|
28
|
-
elif max_val == g:
|
29
|
-
h = (b - r) / d + 2
|
30
|
-
else:
|
31
|
-
h = (r - g) / d + 4
|
32
|
-
h /= 6
|
33
|
-
|
34
|
-
return h, s, lightness
|
pycoze-0.1.20/pycoze/ui/typ.py
DELETED
@@ -1,56 +0,0 @@
|
|
1
|
-
def to_bool(v):
|
2
|
-
if v in ["True", "true", True]:
|
3
|
-
return True
|
4
|
-
elif v in ["False", "false", False]:
|
5
|
-
return False
|
6
|
-
else:
|
7
|
-
return None
|
8
|
-
|
9
|
-
|
10
|
-
def toType(val, typ):
|
11
|
-
val_type = type(val)
|
12
|
-
if typ in [str, float]:
|
13
|
-
return typ(val) if val_type != list else [typ(item) for item in val]
|
14
|
-
elif typ == int:
|
15
|
-
try:
|
16
|
-
return typ(val) if val_type != list else [typ(item) for item in val]
|
17
|
-
except:
|
18
|
-
return float(val) if val_type != list else [float(item) for item in val]
|
19
|
-
elif typ == bool:
|
20
|
-
return to_bool(val) if val_type != list else [to_bool(item) for item in val]
|
21
|
-
|
22
|
-
|
23
|
-
def useDefaultType(val, default):
|
24
|
-
val_type = type(val)
|
25
|
-
default_type = type(default)
|
26
|
-
|
27
|
-
# 根据 default 的类型进行转换
|
28
|
-
if default_type in [str, int, float, bool]:
|
29
|
-
try:
|
30
|
-
val = toType(val, default_type)
|
31
|
-
if val_type == list:
|
32
|
-
try:
|
33
|
-
val = val[0]
|
34
|
-
except:
|
35
|
-
return default
|
36
|
-
except:
|
37
|
-
return None
|
38
|
-
elif default_type == list:
|
39
|
-
try:
|
40
|
-
if len(default) > 0:
|
41
|
-
if val_type != list:
|
42
|
-
val = [useDefaultType(val, default[0])]
|
43
|
-
# 判断 default 列表中元素的类型,假设所有元素类型相同
|
44
|
-
if len(val) > 0:
|
45
|
-
element_type = type(default[0])
|
46
|
-
if type(val[0]) is element_type:
|
47
|
-
val = toType(val, element_type)
|
48
|
-
else:
|
49
|
-
val = []
|
50
|
-
else:
|
51
|
-
if val_type != list:
|
52
|
-
val = [val]
|
53
|
-
except:
|
54
|
-
return None
|
55
|
-
return val
|
56
|
-
|
@@ -1,167 +0,0 @@
|
|
1
|
-
from .base import get_ui
|
2
|
-
from .typ import useDefaultType
|
3
|
-
from typing import Union, List
|
4
|
-
from .color import hex_to_rgb, rgb_to_hsl, ColorFormat
|
5
|
-
import sys
|
6
|
-
|
7
|
-
|
8
|
-
def useDefault(name, default):
|
9
|
-
ui_data = get_ui()
|
10
|
-
if name not in ui_data:
|
11
|
-
return default
|
12
|
-
return useDefaultType(ui_data[name], default)
|
13
|
-
|
14
|
-
|
15
|
-
def get_ui_text():
|
16
|
-
ui = get_ui()
|
17
|
-
output = ""
|
18
|
-
for key, value in ui.items():
|
19
|
-
output += f"{key}: {value}\n"
|
20
|
-
return output
|
21
|
-
|
22
|
-
|
23
|
-
def label(name, tip="", hide_if="", style="", cls=""):
|
24
|
-
pass
|
25
|
-
|
26
|
-
|
27
|
-
def number(
|
28
|
-
name,
|
29
|
-
default,
|
30
|
-
min=-sys.maxsize - 1,
|
31
|
-
max=sys.maxsize,
|
32
|
-
step=1,
|
33
|
-
tip="",
|
34
|
-
hide_if="",
|
35
|
-
style="",
|
36
|
-
cls="",
|
37
|
-
) -> Union[int, float]: # 注意Python3.9不兼容int|float
|
38
|
-
return useDefault(name, default)
|
39
|
-
|
40
|
-
|
41
|
-
def text(name, default, tip="", hide_if="", style="", cls="") -> str:
|
42
|
-
return useDefault(name, default)
|
43
|
-
|
44
|
-
|
45
|
-
def textarea(name, default, tip="", hide_if="", style="", cls="") -> str:
|
46
|
-
return useDefault(name, default)
|
47
|
-
|
48
|
-
|
49
|
-
def password(name, default, tip="", hide_if="", style="", cls="") -> str:
|
50
|
-
return useDefault(name, default)
|
51
|
-
|
52
|
-
|
53
|
-
def color(
|
54
|
-
name,
|
55
|
-
default,
|
56
|
-
color_format: ColorFormat = "hex",
|
57
|
-
tip="",
|
58
|
-
hide_if="",
|
59
|
-
style="",
|
60
|
-
cls="",
|
61
|
-
) -> str:
|
62
|
-
rgbhex = useDefault(name, default)
|
63
|
-
if color_format == "hex":
|
64
|
-
return rgbhex
|
65
|
-
elif color_format == "rgb":
|
66
|
-
return hex_to_rgb(rgbhex)
|
67
|
-
elif color_format == "hsl":
|
68
|
-
return rgb_to_hsl(hex_to_rgb(rgbhex))
|
69
|
-
return rgbhex
|
70
|
-
|
71
|
-
|
72
|
-
def checkbox(name, default, tip="", hide_if="", style="", cls="") -> bool:
|
73
|
-
return useDefault(name, default)
|
74
|
-
|
75
|
-
|
76
|
-
def single_select(
|
77
|
-
name,
|
78
|
-
default: Union[str, int, float, bool],
|
79
|
-
options,
|
80
|
-
tip="",
|
81
|
-
hide_if="",
|
82
|
-
style="",
|
83
|
-
cls="",
|
84
|
-
):
|
85
|
-
return useDefault(name, default)
|
86
|
-
|
87
|
-
|
88
|
-
def multi_select(
|
89
|
-
name,
|
90
|
-
default: Union[List[str], List[int], List[float], List[bool]],
|
91
|
-
options,
|
92
|
-
tip="",
|
93
|
-
hide_if="",
|
94
|
-
style="",
|
95
|
-
cls="",
|
96
|
-
):
|
97
|
-
return useDefault(name, default)
|
98
|
-
|
99
|
-
|
100
|
-
def single_file_select(name, default: str, tip="", hide_if="", style="", cls="") -> str:
|
101
|
-
return useDefault(name, default)
|
102
|
-
|
103
|
-
|
104
|
-
def multi_file_select(
|
105
|
-
name, default: List[str], tip="", hide_if="", style="", cls=""
|
106
|
-
) -> List[str]:
|
107
|
-
return useDefault(name, default)
|
108
|
-
|
109
|
-
|
110
|
-
def single_folder_select(
|
111
|
-
name, default: str, tip="", hide_if="", style="", cls=""
|
112
|
-
) -> str:
|
113
|
-
return useDefault(name, default)
|
114
|
-
|
115
|
-
|
116
|
-
def multi_folder_select(
|
117
|
-
name, default: List[str], tip="", hide_if="", style="", cls=""
|
118
|
-
) -> List[str]:
|
119
|
-
return useDefault(name, default)
|
120
|
-
|
121
|
-
|
122
|
-
def folder_tree(
|
123
|
-
name, root="", default: List[str] = None, tip="", hide_if="", style="", cls=""
|
124
|
-
) -> dict:
|
125
|
-
if default is None:
|
126
|
-
default = []
|
127
|
-
return useDefault(name, default)
|
128
|
-
|
129
|
-
|
130
|
-
def single_image_select(
|
131
|
-
name, default: str, tip="", hide_if="", style="", cls=""
|
132
|
-
) -> str:
|
133
|
-
return useDefault(name, default)
|
134
|
-
|
135
|
-
|
136
|
-
def multi_image_select(
|
137
|
-
name, default: List[str], tip="", hide_if="", style="", cls=""
|
138
|
-
) -> List[str]:
|
139
|
-
return useDefault(name, default)
|
140
|
-
|
141
|
-
|
142
|
-
def single_audio_select(
|
143
|
-
name, default: str, tip="", hide_if="", style="", cls=""
|
144
|
-
) -> str:
|
145
|
-
return useDefault(name, default)
|
146
|
-
|
147
|
-
|
148
|
-
def multi_audio_select(
|
149
|
-
name, default: List[str], tip="", hide_if="", style="", cls=""
|
150
|
-
) -> List[str]:
|
151
|
-
return useDefault(name, default)
|
152
|
-
|
153
|
-
|
154
|
-
def single_video_select(
|
155
|
-
name, default: str, tip="", hide_if="", style="", cls=""
|
156
|
-
) -> str:
|
157
|
-
return useDefault(name, default)
|
158
|
-
|
159
|
-
|
160
|
-
def multi_video_select(
|
161
|
-
name, default: List[str], tip="", hide_if="", style="", cls=""
|
162
|
-
) -> List[str]:
|
163
|
-
return useDefault(name, default)
|
164
|
-
|
165
|
-
|
166
|
-
def seed(name, default=0, tip="", hide_if="", style="", cls="") -> int:
|
167
|
-
return useDefault(name, default)
|
@@ -1 +0,0 @@
|
|
1
|
-
from .arg import read_arg, read_params
|
@@ -1,25 +0,0 @@
|
|
1
|
-
import argparse
|
2
|
-
import sys
|
3
|
-
import json
|
4
|
-
|
5
|
-
def read_arg(param, is_path=False):
|
6
|
-
args = sys.argv[1:]
|
7
|
-
parser = argparse.ArgumentParser()
|
8
|
-
parser.add_argument(param, nargs='?', help=f'Parameter {param}')
|
9
|
-
args = parser.parse_known_args(args)[0]
|
10
|
-
value = getattr(args, param.lstrip('-'))
|
11
|
-
# 如果是路径并且有引号,去掉引号
|
12
|
-
if is_path and value and value.startswith('"') and value.endswith('"'):
|
13
|
-
value = value[1:-1]
|
14
|
-
|
15
|
-
return value
|
16
|
-
|
17
|
-
def read_params():
|
18
|
-
params_file = read_arg('params_file', True)
|
19
|
-
params = None
|
20
|
-
try:
|
21
|
-
with open(params_file, 'r', encoding='utf-8') as f:
|
22
|
-
params = json.load(f)
|
23
|
-
except Exception as e:
|
24
|
-
print(e)
|
25
|
-
return params
|
@@ -1,29 +0,0 @@
|
|
1
|
-
LICENSE
|
2
|
-
README.md
|
3
|
-
setup.py
|
4
|
-
pycoze/__init__.py
|
5
|
-
pycoze/module.py
|
6
|
-
pycoze.egg-info/PKG-INFO
|
7
|
-
pycoze.egg-info/SOURCES.txt
|
8
|
-
pycoze.egg-info/dependency_links.txt
|
9
|
-
pycoze.egg-info/top_level.txt
|
10
|
-
pycoze/bot/__init__.py
|
11
|
-
pycoze/bot/base.py
|
12
|
-
pycoze/bot/bot.py
|
13
|
-
pycoze/bot/agent/__init__.py
|
14
|
-
pycoze/bot/agent/agent.py
|
15
|
-
pycoze/bot/agent/assistant.py
|
16
|
-
pycoze/bot/agent/chat.py
|
17
|
-
pycoze/bot/agent/agent_types/__init__.py
|
18
|
-
pycoze/bot/agent/agent_types/openai_func_call_agent.py
|
19
|
-
pycoze/bot/agent/agent_types/react_agent.py
|
20
|
-
pycoze/bot/agent/agent_types/react_prompt.py
|
21
|
-
pycoze/gpu/__init__.py
|
22
|
-
pycoze/gpu/gpu_reserve.py
|
23
|
-
pycoze/ui/__init__.py
|
24
|
-
pycoze/ui/base.py
|
25
|
-
pycoze/ui/color.py
|
26
|
-
pycoze/ui/typ.py
|
27
|
-
pycoze/ui/ui_def.py
|
28
|
-
pycoze/utils/__init__.py
|
29
|
-
pycoze/utils/arg.py
|
@@ -1 +0,0 @@
|
|
1
|
-
pycoze
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|