pycoze 0.1.29__py3-none-any.whl → 0.1.31__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pycoze/bot/agent/agent_types/__init__.py +2 -7
- pycoze/bot/agent/agent_types/openai_func_call_agent.py +27 -22
- pycoze/bot/agent/agent_types/react_agent.py +1 -1
- pycoze/bot/agent/agent_types/react_prompt.py +33 -9
- pycoze/bot/base.py +11 -4
- pycoze/bot/bot.py +31 -20
- pycoze/gpu/gpu_reserve.py +14 -8
- pycoze/utils/__init__.py +1 -0
- pycoze/utils/arg.py +12 -10
- {pycoze-0.1.29.dist-info → pycoze-0.1.31.dist-info}/METADATA +1 -1
- {pycoze-0.1.29.dist-info → pycoze-0.1.31.dist-info}/RECORD +14 -14
- {pycoze-0.1.29.dist-info → pycoze-0.1.31.dist-info}/LICENSE +0 -0
- {pycoze-0.1.29.dist-info → pycoze-0.1.31.dist-info}/WHEEL +0 -0
- {pycoze-0.1.29.dist-info → pycoze-0.1.31.dist-info}/top_level.txt +0 -0
@@ -1,10 +1,5 @@
|
|
1
|
-
from .openai_func_call_agent import
|
2
|
-
create_openai_func_call_agent_executor
|
3
|
-
)
|
1
|
+
from .openai_func_call_agent import create_openai_func_call_agent_executor
|
4
2
|
from .react_agent import create_react_agent_executor
|
5
3
|
|
6
4
|
|
7
|
-
__all__ = [
|
8
|
-
create_openai_func_call_agent_executor,
|
9
|
-
create_react_agent_executor
|
10
|
-
]
|
5
|
+
__all__ = [create_openai_func_call_agent_executor, create_react_agent_executor]
|
@@ -4,22 +4,23 @@ import json
|
|
4
4
|
from langchain.tools import BaseTool
|
5
5
|
from langchain_core.utils.function_calling import convert_to_openai_tool
|
6
6
|
from langchain_core.language_models.base import LanguageModelLike
|
7
|
-
from langchain_core.messages import
|
7
|
+
from langchain_core.messages import SystemMessage, ToolMessage
|
8
8
|
from langgraph.graph import END
|
9
9
|
from langgraph.graph.message import MessageGraph
|
10
10
|
from langgraph.prebuilt import ToolExecutor, ToolInvocation
|
11
11
|
from typing import Any
|
12
12
|
|
13
13
|
|
14
|
-
def create_openai_func_call_agent_executor(
|
15
|
-
|
14
|
+
def create_openai_func_call_agent_executor(
|
15
|
+
tools: list[BaseTool], llm: LanguageModelLike, system_message: str, **kwargs
|
16
|
+
):
|
16
17
|
|
17
18
|
async def _get_messages(messages):
|
18
19
|
msgs = []
|
19
20
|
for m in messages:
|
20
21
|
if isinstance(m, ToolMessage):
|
21
22
|
_dict = m.dict()
|
22
|
-
_dict[
|
23
|
+
_dict["content"] = str(_dict["content"])
|
23
24
|
m_c = ToolMessage(**_dict)
|
24
25
|
msgs.append(m_c)
|
25
26
|
else:
|
@@ -50,43 +51,47 @@ def create_openai_func_call_agent_executor(tools: list[BaseTool], llm: LanguageM
|
|
50
51
|
# Based on the continue condition
|
51
52
|
# we know the last message involves a FuncCall
|
52
53
|
last_message = messages[-1]
|
53
|
-
for tool_call in last_message.additional_kwargs[
|
54
|
-
function = tool_call[
|
55
|
-
function_name = function[
|
56
|
-
_tool_input = json.loads(function[
|
54
|
+
for tool_call in last_message.additional_kwargs["tool_calls"]:
|
55
|
+
function = tool_call["function"]
|
56
|
+
function_name = function["name"]
|
57
|
+
_tool_input = json.loads(function["arguments"] or "{}")
|
57
58
|
# We construct an ToolInvocation from the function_call
|
58
|
-
actions.append(
|
59
|
-
|
60
|
-
|
61
|
-
|
59
|
+
actions.append(
|
60
|
+
ToolInvocation(
|
61
|
+
tool=function_name,
|
62
|
+
tool_input=_tool_input,
|
63
|
+
)
|
64
|
+
)
|
62
65
|
# We call the tool_executor and get back a response
|
63
66
|
responses = await tool_executor.abatch(actions, **kwargs)
|
64
67
|
# We use the response to create a ToolMessage
|
65
68
|
tool_messages = [
|
66
69
|
ToolMessage(
|
67
|
-
tool_call_id=tool_call[
|
70
|
+
tool_call_id=tool_call["id"],
|
68
71
|
content=response,
|
69
|
-
additional_kwargs={
|
72
|
+
additional_kwargs={"name": tool_call["function"]["name"]},
|
73
|
+
)
|
74
|
+
for tool_call, response in zip(
|
75
|
+
last_message.additional_kwargs["tool_calls"], responses
|
70
76
|
)
|
71
|
-
for tool_call, response in zip(last_message.additional_kwargs['tool_calls'], responses)
|
72
77
|
]
|
73
78
|
return tool_messages
|
74
79
|
|
75
80
|
workflow = MessageGraph()
|
76
81
|
|
77
82
|
# Define the two nodes we will cycle between
|
78
|
-
workflow.add_node(
|
79
|
-
workflow.add_node(
|
83
|
+
workflow.add_node("agent", agent)
|
84
|
+
workflow.add_node("action", call_tool)
|
80
85
|
|
81
86
|
# Set the entrypoint as `agent`
|
82
87
|
# This means that this node is the first one called
|
83
|
-
workflow.set_entry_point(
|
88
|
+
workflow.set_entry_point("agent")
|
84
89
|
|
85
90
|
# We now add a conditional edge
|
86
91
|
workflow.add_conditional_edges(
|
87
92
|
# First, we define the start node. We use `agent`.
|
88
93
|
# This means these are the edges taken after the `agent` node is called.
|
89
|
-
|
94
|
+
"agent",
|
90
95
|
# Next, we pass in the function that will determine which node is called next.
|
91
96
|
should_continue,
|
92
97
|
# Finally we pass in a mapping.
|
@@ -97,15 +102,15 @@ def create_openai_func_call_agent_executor(tools: list[BaseTool], llm: LanguageM
|
|
97
102
|
# Based on which one it matches, that node will then be called.
|
98
103
|
{
|
99
104
|
# If `tools`, then we call the tool node.
|
100
|
-
|
105
|
+
"continue": "action",
|
101
106
|
# Otherwise we finish.
|
102
|
-
|
107
|
+
"end": END,
|
103
108
|
},
|
104
109
|
)
|
105
110
|
|
106
111
|
# We now add a normal edge from `tools` to `agent`.
|
107
112
|
# This means that after `tools` is called, `agent` node is called next.
|
108
|
-
workflow.add_edge(
|
113
|
+
workflow.add_edge("action", "agent")
|
109
114
|
|
110
115
|
# Finally, we compile it!
|
111
116
|
# This compiles it into a LangChain Runnable,
|
@@ -17,7 +17,7 @@ def create_react_agent_executor(
|
|
17
17
|
tools: list[BaseTool],
|
18
18
|
llm: LanguageModelLike,
|
19
19
|
system_message: str,
|
20
|
-
**kwargs
|
20
|
+
**kwargs # ignore
|
21
21
|
):
|
22
22
|
prompt = react_agent_prompt.partial(assistant_message=system_message)
|
23
23
|
agent = create_structured_chat_agent(llm, tools, prompt)
|
@@ -3,9 +3,16 @@ from langchain_core.prompts.chat import (
|
|
3
3
|
ChatPromptTemplate,
|
4
4
|
HumanMessagePromptTemplate,
|
5
5
|
SystemMessagePromptTemplate,
|
6
|
-
MessagesPlaceholder
|
6
|
+
MessagesPlaceholder,
|
7
|
+
)
|
8
|
+
from langchain_core.messages import (
|
9
|
+
FunctionMessage,
|
10
|
+
SystemMessage,
|
11
|
+
ToolMessage,
|
12
|
+
AIMessage,
|
13
|
+
HumanMessage,
|
14
|
+
ChatMessage,
|
7
15
|
)
|
8
|
-
from langchain_core.messages import FunctionMessage, SystemMessage, ToolMessage, AIMessage, HumanMessage, ChatMessage
|
9
16
|
|
10
17
|
|
11
18
|
system_temp = """
|
@@ -56,12 +63,29 @@ Thought: {agent_scratchpad}
|
|
56
63
|
|
57
64
|
|
58
65
|
react_agent_prompt = ChatPromptTemplate(
|
59
|
-
input_variables=[
|
60
|
-
|
61
|
-
|
66
|
+
input_variables=[
|
67
|
+
"agent_scratchpad",
|
68
|
+
"input",
|
69
|
+
"tool_names",
|
70
|
+
"tools",
|
71
|
+
"assistant_message",
|
72
|
+
],
|
73
|
+
optional_variables=["chat_history"],
|
74
|
+
input_types={
|
75
|
+
"chat_history": List[
|
76
|
+
Union[
|
77
|
+
AIMessage,
|
78
|
+
HumanMessage,
|
79
|
+
ChatMessage,
|
80
|
+
SystemMessage,
|
81
|
+
FunctionMessage,
|
82
|
+
ToolMessage,
|
83
|
+
]
|
84
|
+
]
|
85
|
+
},
|
62
86
|
messages=[
|
63
87
|
SystemMessagePromptTemplate.from_template(system_temp),
|
64
|
-
MessagesPlaceholder(variable_name=
|
65
|
-
HumanMessagePromptTemplate.from_template(human_temp)
|
66
|
-
]
|
67
|
-
)
|
88
|
+
MessagesPlaceholder(variable_name="chat_history", optional=True),
|
89
|
+
HumanMessagePromptTemplate.from_template(human_temp),
|
90
|
+
],
|
91
|
+
)
|
pycoze/bot/base.py
CHANGED
@@ -6,8 +6,10 @@ from langchain.agents import tool as _tool
|
|
6
6
|
import types
|
7
7
|
import langchain_core
|
8
8
|
|
9
|
+
|
9
10
|
def wrapped_tool(tool, module_path):
|
10
11
|
old_tool_fun = tool.func
|
12
|
+
|
11
13
|
def _wrapped_tool(*args, **kwargs):
|
12
14
|
print(f"调用了{tool.name}")
|
13
15
|
old_path = os.getcwd()
|
@@ -20,6 +22,7 @@ def wrapped_tool(tool, module_path):
|
|
20
22
|
os.chdir(old_path)
|
21
23
|
print(f"{tool.name}调用完毕,结果为:", result)
|
22
24
|
return result
|
25
|
+
|
23
26
|
return _wrapped_tool
|
24
27
|
|
25
28
|
|
@@ -43,8 +46,12 @@ def import_tools(tool_id):
|
|
43
46
|
export_tools = getattr(module, "export_tools")
|
44
47
|
temp_list = []
|
45
48
|
for tool in export_tools:
|
46
|
-
assert isinstance(tool, langchain_core.tools.StructuredTool) or isinstance(
|
47
|
-
|
49
|
+
assert isinstance(tool, langchain_core.tools.StructuredTool) or isinstance(
|
50
|
+
tool, types.FunctionType
|
51
|
+
), f"Tool is not a StructuredTool or function: {tool}"
|
52
|
+
if isinstance(tool, types.FunctionType) and not isinstance(
|
53
|
+
tool, langchain_core.tools.StructuredTool
|
54
|
+
):
|
48
55
|
temp_list.append(_tool(tool))
|
49
56
|
export_tools = temp_list
|
50
57
|
|
@@ -71,9 +78,9 @@ def import_tools(tool_id):
|
|
71
78
|
|
72
79
|
def read_arg(param: str, is_path=False):
|
73
80
|
parser = argparse.ArgumentParser()
|
74
|
-
parser.add_argument(param, nargs=
|
81
|
+
parser.add_argument(param, nargs="?", help=f"Parameter {param}")
|
75
82
|
args = parser.parse_args()
|
76
|
-
value = getattr(args, param.lstrip(
|
83
|
+
value = getattr(args, param.lstrip("-"))
|
77
84
|
# 如果是路径并且有引号,去掉引号
|
78
85
|
if is_path and value and value.startswith('"') and value.endswith('"'):
|
79
86
|
value = value[1:-1]
|
pycoze/bot/bot.py
CHANGED
@@ -3,18 +3,20 @@ from langchain_openai import ChatOpenAI
|
|
3
3
|
from .base import import_tools
|
4
4
|
from .agent import run_agent, Runnable, INPUT_MESSAGE, output
|
5
5
|
import asyncio
|
6
|
-
from langchain_core.messages import HumanMessage
|
6
|
+
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
|
7
7
|
from pycoze import utils
|
8
8
|
|
9
9
|
|
10
10
|
params = utils.arg.read_params()
|
11
11
|
llm_file = params["appPath"] + "/JsonStorage/llm.json"
|
12
12
|
|
13
|
-
|
13
|
+
|
14
|
+
def load_role_setting(bot_setting_file: str):
|
14
15
|
with open(bot_setting_file, "r", encoding="utf-8") as f:
|
15
16
|
return json.load(f)
|
16
17
|
|
17
|
-
|
18
|
+
|
19
|
+
def load_tools(bot_setting_file: str):
|
18
20
|
with open(bot_setting_file, "r", encoding="utf-8") as f:
|
19
21
|
role_setting = json.load(f)
|
20
22
|
|
@@ -24,31 +26,40 @@ def load_tools(bot_setting_file:str):
|
|
24
26
|
return tools
|
25
27
|
|
26
28
|
|
27
|
-
|
28
|
-
|
29
|
-
def chat(bot_setting_file:str):
|
30
|
-
history = []
|
29
|
+
def chat(bot_setting_file: str):
|
31
30
|
|
32
31
|
while True:
|
33
|
-
|
32
|
+
input_text = input()
|
34
33
|
role_setting = load_role_setting(bot_setting_file)
|
35
34
|
tools = load_tools(bot_setting_file)
|
36
|
-
if not
|
35
|
+
if not input_text.startswith(INPUT_MESSAGE):
|
37
36
|
raise ValueError("Invalid message")
|
38
|
-
|
39
|
-
print("user:", message)
|
37
|
+
messages = json.loads(input_text[len(INPUT_MESSAGE) :])
|
40
38
|
|
41
39
|
with open(llm_file, "r", encoding="utf-8") as f:
|
42
40
|
cfg = json.load(f)
|
43
|
-
chat = ChatOpenAI(
|
41
|
+
chat = ChatOpenAI(
|
42
|
+
api_key=cfg["apiKey"],
|
43
|
+
base_url=cfg["baseURL"],
|
44
|
+
model=cfg["model"],
|
45
|
+
temperature=role_setting["temperature"],
|
46
|
+
)
|
44
47
|
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
history
|
48
|
+
agent = Runnable(
|
49
|
+
agent_execution_mode="FuncCall", # 'FuncCall' or 'ReAct',大模型支持FuncCall的话就用FuncCall
|
50
|
+
tools=tools,
|
51
|
+
llm=chat,
|
52
|
+
assistant_message=role_setting["prompt"],
|
53
|
+
)
|
54
|
+
history = []
|
55
|
+
for message in messages:
|
56
|
+
if message["role"] == "assistant":
|
57
|
+
history += [AIMessage(content=message["content"])]
|
58
|
+
elif message["role"] == "user":
|
59
|
+
history += [HumanMessage(content=message["content"])]
|
60
|
+
elif message["role"] == "system":
|
61
|
+
history += [SystemMessage(content=message["content"])]
|
62
|
+
else:
|
63
|
+
raise ValueError("Invalid message")
|
52
64
|
result = asyncio.run(run_agent(agent, history))
|
53
65
|
output("assistant", result, history)
|
54
|
-
|
pycoze/gpu/gpu_reserve.py
CHANGED
@@ -4,6 +4,7 @@ import atexit
|
|
4
4
|
import time
|
5
5
|
import os
|
6
6
|
import psutil
|
7
|
+
import sys
|
7
8
|
|
8
9
|
try:
|
9
10
|
from pycoze.utils import utils
|
@@ -99,12 +100,15 @@ def reserve_gpu(gb):
|
|
99
100
|
return False
|
100
101
|
|
101
102
|
|
102
|
-
def reserve_gpu_retry(gb, retry=
|
103
|
+
def reserve_gpu_retry(gb, retry=None):
|
104
|
+
if retry is None:
|
105
|
+
# 接近无限重试,python中允许无限大的整数,尽管sys.maxsize不是真正的无限大,但足够大
|
106
|
+
retry = sys.maxsize
|
103
107
|
for i in range(retry):
|
104
108
|
time.sleep(1)
|
109
|
+
if i % 10 == 0 or i < 10:
|
110
|
+
print(f"重试第{i}次")
|
105
111
|
if reserve_gpu(gb):
|
106
|
-
if i % 10 == 0 or i < 10:
|
107
|
-
print(f"重试第{i}次")
|
108
112
|
return True
|
109
113
|
return False
|
110
114
|
|
@@ -158,10 +162,12 @@ initialize_and_check()
|
|
158
162
|
atexit.register(cleanup)
|
159
163
|
|
160
164
|
if __name__ == "__main__":
|
161
|
-
if reserve_gpu_retry(
|
162
|
-
print("GPU资源预留成功")
|
163
|
-
|
164
|
-
|
165
|
-
|
165
|
+
if reserve_gpu_retry(5):
|
166
|
+
print("(1)GPU资源预留成功")
|
167
|
+
if reserve_gpu_retry(5):
|
168
|
+
print("(2)GPU资源预留成功")
|
169
|
+
time.sleep(100)
|
170
|
+
release_gpu()
|
171
|
+
print("GPU资源释放成功")
|
166
172
|
else:
|
167
173
|
print("GPU资源不足,无法预留")
|
pycoze/utils/__init__.py
CHANGED
pycoze/utils/arg.py
CHANGED
@@ -2,24 +2,26 @@ import argparse
|
|
2
2
|
import sys
|
3
3
|
import json
|
4
4
|
|
5
|
+
|
5
6
|
def read_arg(param, is_path=False):
|
6
7
|
args = sys.argv[1:]
|
7
8
|
parser = argparse.ArgumentParser()
|
8
|
-
parser.add_argument(param, nargs=
|
9
|
+
parser.add_argument(param, nargs="?", help=f"Parameter {param}")
|
9
10
|
args = parser.parse_known_args(args)[0]
|
10
|
-
value = getattr(args, param.lstrip(
|
11
|
+
value = getattr(args, param.lstrip("-"))
|
11
12
|
# 如果是路径并且有引号,去掉引号
|
12
13
|
if is_path and value and value.startswith('"') and value.endswith('"'):
|
13
14
|
value = value[1:-1]
|
14
15
|
|
15
16
|
return value
|
16
17
|
|
18
|
+
|
17
19
|
def read_params():
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
20
|
+
params_file = read_arg("params_file", True)
|
21
|
+
params = None
|
22
|
+
try:
|
23
|
+
with open(params_file, "r", encoding="utf-8") as f:
|
24
|
+
params = json.load(f)
|
25
|
+
except Exception as e:
|
26
|
+
print(e)
|
27
|
+
return params
|
@@ -1,28 +1,28 @@
|
|
1
1
|
pycoze/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
2
|
pycoze/module.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
3
|
pycoze/bot/__init__.py,sha256=pciDtfcIXda7iFt9uI5Fpm0JKpGBhdXHmJv4966WTVU,21
|
4
|
-
pycoze/bot/base.py,sha256
|
5
|
-
pycoze/bot/bot.py,sha256=
|
4
|
+
pycoze/bot/base.py,sha256=GWYDVGGtiCpk6gv-163cAbDid_IsnMe5jTj7eZUMJQU,2679
|
5
|
+
pycoze/bot/bot.py,sha256=c8xje38_BMf4RgkWNDDuGBelHHszosMdYZevGD8dk-c,2225
|
6
6
|
pycoze/bot/agent/__init__.py,sha256=IaYqQCJ3uBor92JdOxI_EY4HtYOHgej8lijr3UrN1Vc,161
|
7
7
|
pycoze/bot/agent/agent.py,sha256=vDbTCorUL6Eh2Az4uzwGsLa3Hp4EPcOkq62JpQXt8-s,3435
|
8
8
|
pycoze/bot/agent/assistant.py,sha256=QLeWaPi415P9jruYOm8qcIbC94cXXAhJYmLTkyC9NTQ,1267
|
9
9
|
pycoze/bot/agent/chat.py,sha256=C2X0meUcIPbn5FCxvhkhxozldPG7qdb2jVR-WnPqqnQ,791
|
10
|
-
pycoze/bot/agent/agent_types/__init__.py,sha256=
|
11
|
-
pycoze/bot/agent/agent_types/openai_func_call_agent.py,sha256=
|
12
|
-
pycoze/bot/agent/agent_types/react_agent.py,sha256=
|
13
|
-
pycoze/bot/agent/agent_types/react_prompt.py,sha256=
|
10
|
+
pycoze/bot/agent/agent_types/__init__.py,sha256=W2jTNMLqUMqgCMG0Tw0d8n7WpsbsnIonqaPR-YLegLU,210
|
11
|
+
pycoze/bot/agent/agent_types/openai_func_call_agent.py,sha256=LBCkcPbFHpI9ijrze2X15O-9tXxCbi7IKbJRtrBQjwg,4504
|
12
|
+
pycoze/bot/agent/agent_types/react_agent.py,sha256=AnjHwHXVwLAm77ndglJGi4rQhqDGWaLuUfl46uZVSzM,6749
|
13
|
+
pycoze/bot/agent/agent_types/react_prompt.py,sha256=jyovokGaPzNIe5bvTRvn0gmsWLx5kpDIPmRwmEMCl-M,2142
|
14
14
|
pycoze/gpu/__init__.py,sha256=cuxwDdz2Oo-VcwZ50FtFtEIJXdqoz2el-n0QpSt_NMc,75
|
15
|
-
pycoze/gpu/gpu_reserve.py,sha256=
|
15
|
+
pycoze/gpu/gpu_reserve.py,sha256=T-M1w8DiQQNtyNVok9Fj9qXGfTs5mxJp-gw-rxs50es,5410
|
16
16
|
pycoze/ui/__init__.py,sha256=CBBpypz4qDoqJkPmGqR2BroCOEIX4rZiJucRsANPnwo,448
|
17
17
|
pycoze/ui/base.py,sha256=nXNXRTZ5Tl1AQp5nfjzLvOVzt_1nLSCn2IOyfxAN_fc,1471
|
18
18
|
pycoze/ui/color.py,sha256=cT9Ib8uNzkOKxyW0IwVj46o4LwdB1xgNCj1_Rou9d_4,854
|
19
19
|
pycoze/ui/typ.py,sha256=NpT0FrbHvByOszBZMFtroRp7I7pN-38tYz_zPOPejF4,1723
|
20
20
|
pycoze/ui/ui_def.py,sha256=CNFYH8NC-WYmbceIPpxsRr9H6O006pMKukx7U-BOE1Q,3744
|
21
|
-
pycoze/utils/__init__.py,sha256=
|
22
|
-
pycoze/utils/arg.py,sha256=
|
21
|
+
pycoze/utils/__init__.py,sha256=KExBkotf23dr2NfTEouWke5nJB1q2IuDXgHrmuyd95k,73
|
22
|
+
pycoze/utils/arg.py,sha256=rRujm1zKc0XlnNlpIJ6JAAaFiTzDGmL_RliIpSc5OD8,724
|
23
23
|
pycoze/utils/text_or_file.py,sha256=gpxZVWt2DW6YiEg_MnMuwg36VNf3TX383QD_1oZNB0Y,551
|
24
|
-
pycoze-0.1.
|
25
|
-
pycoze-0.1.
|
26
|
-
pycoze-0.1.
|
27
|
-
pycoze-0.1.
|
28
|
-
pycoze-0.1.
|
24
|
+
pycoze-0.1.31.dist-info/LICENSE,sha256=QStd_Qsd0-kAam_-sOesCIp_uKrGWeoKwt9M49NVkNU,1090
|
25
|
+
pycoze-0.1.31.dist-info/METADATA,sha256=uUXKqs4ukQy4dj2MHEabHQPWu8IdK-sckxW0R4GV5kY,719
|
26
|
+
pycoze-0.1.31.dist-info/WHEEL,sha256=UvcQYKBHoFqaQd6LKyqHw9fxEolWLQnlzP0h_LgJAfI,91
|
27
|
+
pycoze-0.1.31.dist-info/top_level.txt,sha256=76dPeDhKvOCleL3ZC5gl1-y4vdS1tT_U1hxWVAn7sFo,7
|
28
|
+
pycoze-0.1.31.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|