pycoze 0.1.285__py3-none-any.whl → 0.1.288__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pycoze/ai/llm/chat.py CHANGED
@@ -1,131 +1,124 @@
1
- from pycoze import utils
2
- import json5
3
- import re
4
- from retrying import retry
5
- from openai import OpenAI
6
- import openai
7
- import requests
8
- import os
9
-
10
-
11
- def never_retry_on_rate_limit_error(exception):
12
- """Return True if we should retry (in this case when it's NOT a RateLimitError), False otherwise"""
13
- return not isinstance(exception, openai.RateLimitError)
14
-
15
- @retry(retry_on_exception=never_retry_on_rate_limit_error, wait_exponential_multiplier=500, stop_max_attempt_number=5)
16
- def chat(user_text, history, temperature=0.2, stop=None, **kwargs):
17
- user_msg = {"role": "user", "content": user_text}
18
- cfg = utils.read_json_file("llm.json")
19
-
20
- base_url = cfg["baseURL"]
21
- api_key = cfg["apiKey"]
22
- model = cfg["model"]
23
-
24
- base_url = base_url.replace("/chat/completions", "")
25
-
26
- client = OpenAI(api_key=api_key, base_url=base_url)
27
-
28
- response = client.chat.completions.create(model=model,
29
- messages=simple_history(history) + [user_msg],
30
- temperature=temperature,
31
- stop=stop)
32
- return response.choices[0].message.content
33
-
34
-
35
- @retry(retry_on_exception=never_retry_on_rate_limit_error, wait_exponential_multiplier=500, stop_max_attempt_number=5)
36
- def chat_stream(user_text, history, temperature=0.2, stop=None, **kwargs):
37
- user_msg = {"role": "user", "content": user_text}
38
- cfg = utils.read_json_file("llm.json")
39
-
40
- base_url = cfg["baseURL"]
41
- api_key = cfg["apiKey"]
42
- model = cfg["model"]
43
-
44
- base_url = base_url.replace("/chat/completions", "")
45
-
46
- client = OpenAI(api_key=api_key, base_url=base_url)
47
-
48
-
49
- stream = client.chat.completions.create(model=model,
50
- messages=simple_history(history) + [user_msg],
51
- stream=True,
52
- temperature=temperature,
53
- stop=stop)
54
- for chunk in stream:
55
- yield chunk.choices[0].delta.content or ""
56
-
57
-
58
- def simple_history(history):
59
- return [{"role": h["role"], "content": h["content"]} for h in history]
60
-
61
-
62
- @retry(retry_on_exception=never_retry_on_rate_limit_error, wait_exponential_multiplier=500, stop_max_attempt_number=3)
63
- def extract(response_data, text: str, temperature=0, **kwargs):
64
- """print(extract({"name": "lowercase"}, "hello XiaoMing"))"""
65
- if isinstance(response_data, dict):
66
- response_items = [[res, response_data[res]] for res in response_data]
67
- else:
68
- response_items = response_data
69
-
70
- json_text = ""
71
- for i, res in enumerate(response_items):
72
- comma = "," if i != len(response_items) - 1 else ""
73
- json_text += f' "{res[0]}": {res[1]}{comma}\n'
74
-
75
- # Combine the provided text with the formatted JSON schema
76
- chat_text = f"""
77
- The output should be a markdown code snippet formatted in the following schema, including the leading and trailing "```json" and "```" tags:
78
- ```json
79
- {{
80
- {json_text}
81
- }}
82
- ```
83
-
84
- Request:
85
- {text}
86
- """
87
- # text放后面,当翻译等情况时,不会把"The output should"之类翻译了,导致错误
88
- markdown = chat(chat_text, [], temperature=temperature, **kwargs)
89
- pattern = r'```json(.*?)```'
90
- matches = re.findall(pattern, markdown, re.DOTALL)
91
- if matches:
92
- json_str = matches[0].strip()
93
- # lines = [line.split("//")[0] for line in json_str.split("\n")]//这样当json中有//时会出错,例如https://
94
- json_dict = json5.loads(json_str)
95
- for item in response_items:
96
- if item[0] not in json_dict:
97
- raise "item:" + item + " not exists"
98
- return json_dict
99
-
100
-
101
- def yes_or_no(question, temperature=0, **kwargs):
102
- result = extract([("Result", "Yes or No")], question,
103
- temperature=temperature, **kwargs)["Result"]
104
- if isinstance(result, bool):
105
- return result
106
- return result.upper() == "YES"
107
-
108
-
109
- @retry(retry_on_exception=never_retry_on_rate_limit_error, wait_exponential_multiplier=500, stop_max_attempt_number=3)
110
- def extract_code(text: str, temperature=0, language="python", markdown_word='python', **kwargs):
111
- """print(extract_code("sum 1~100"))"""
112
- chat_text = text + f"""
113
- The output should be a complete and usable {language} code snippet, including the leading and trailing "```{markdown_word}" and "```":
114
- """
115
- markdown = chat(chat_text, [], temperature=temperature, **kwargs)
116
- # 使用正则表达式匹配围绕在```{markdown_word} ```之间的文本
117
- pattern = rf'```{markdown_word}(.*?)```'
118
- matches = re.findall(pattern, markdown, re.DOTALL)
119
- if matches:
120
- # 去除可能的前后空白字符
121
- return matches[0].strip()
122
- else:
123
- raise Exception("The string is not a valid python code.")
124
-
125
-
126
- if __name__ == "__main__":
127
- print(chat("你好", []))
128
- for chunk in chat_stream("你好", []):
129
- print(chunk)
130
- print(extract({"name": "lowercase"}, "hello XiaoMing"))
131
- print(extract_code("sum 1~100"))
1
+ from pycoze import utils
2
+ import json5
3
+ import re
4
+ from retrying import retry
5
+ from openai import OpenAI
6
+ import openai
7
+
8
+
9
+ def never_retry_on_rate_limit_error(exception):
10
+ """Return True if we should retry (in this case when it's NOT a RateLimitError), False otherwise"""
11
+ return not isinstance(exception, openai.RateLimitError)
12
+
13
+ @retry(retry_on_exception=never_retry_on_rate_limit_error, wait_exponential_multiplier=500, stop_max_attempt_number=5)
14
+ def chat(messages, temperature=0.2, stop=None, **kwargs):
15
+ cfg = utils.read_json_file("llm.json")
16
+
17
+ base_url = cfg["baseURL"]
18
+ api_key = cfg["apiKey"]
19
+ model = cfg["model"]
20
+
21
+ base_url = base_url.replace("/chat/completions", "")
22
+
23
+ client = OpenAI(api_key=api_key, base_url=base_url)
24
+
25
+ response = client.chat.completions.create(model=model,
26
+ messages=messages,
27
+ temperature=temperature,
28
+ stop=stop)
29
+ return response.choices[0].message.content
30
+
31
+
32
+ @retry(retry_on_exception=never_retry_on_rate_limit_error, wait_exponential_multiplier=500, stop_max_attempt_number=5)
33
+ def chat_stream(user_text, history, temperature=0.2, stop=None, **kwargs):
34
+ user_msg = {"role": "user", "content": user_text}
35
+ cfg = utils.read_json_file("llm.json")
36
+
37
+ base_url = cfg["baseURL"]
38
+ api_key = cfg["apiKey"]
39
+ model = cfg["model"]
40
+
41
+ base_url = base_url.replace("/chat/completions", "")
42
+
43
+ client = OpenAI(api_key=api_key, base_url=base_url)
44
+
45
+
46
+ stream = client.chat.completions.create(model=model,
47
+ messages=simple_history(history) + [user_msg],
48
+ stream=True,
49
+ temperature=temperature,
50
+ stop=stop)
51
+ for chunk in stream:
52
+ yield chunk.choices[0].delta.content or ""
53
+
54
+
55
+ @retry(retry_on_exception=never_retry_on_rate_limit_error, wait_exponential_multiplier=500, stop_max_attempt_number=3)
56
+ def extract(response_data, text: str, temperature=0, **kwargs):
57
+ """print(extract({"name": "lowercase"}, "hello XiaoMing"))"""
58
+ if isinstance(response_data, dict):
59
+ response_items = [[res, response_data[res]] for res in response_data]
60
+ else:
61
+ response_items = response_data
62
+
63
+ json_text = ""
64
+ for i, res in enumerate(response_items):
65
+ comma = "," if i != len(response_items) - 1 else ""
66
+ json_text += f' "{res[0]}": {res[1]}{comma}\n'
67
+
68
+ # Combine the provided text with the formatted JSON schema
69
+ chat_text = f"""
70
+ The output should be a markdown code snippet formatted in the following schema, including the leading and trailing "```json" and "```" tags:
71
+ ```json
72
+ {{
73
+ {json_text}
74
+ }}
75
+ ```
76
+
77
+ Request:
78
+ {text}
79
+ """
80
+ # text放后面,当翻译等情况时,不会把"The output should"之类翻译了,导致错误
81
+ markdown = chat([{"role": "user", "content": chat_text}], temperature=temperature, **kwargs)
82
+ pattern = r'```json(.*?)```'
83
+ matches = re.findall(pattern, markdown, re.DOTALL)
84
+ if matches:
85
+ json_str = matches[0].strip()
86
+ # lines = [line.split("//")[0] for line in json_str.split("\n")]//这样当json中有//时会出错,例如https://
87
+ json_dict = json5.loads(json_str)
88
+ for item in response_items:
89
+ if item[0] not in json_dict:
90
+ raise "item:" + item + " not exists"
91
+ return json_dict
92
+
93
+
94
+ def yes_or_no(question, temperature=0, **kwargs):
95
+ result = extract([("Result", "Yes or No")], question,
96
+ temperature=temperature, **kwargs)["Result"]
97
+ if isinstance(result, bool):
98
+ return result
99
+ return result.upper() == "YES"
100
+
101
+
102
+ @retry(retry_on_exception=never_retry_on_rate_limit_error, wait_exponential_multiplier=500, stop_max_attempt_number=3)
103
+ def extract_code(text: str, temperature=0, language="python", markdown_word='python', **kwargs):
104
+ """print(extract_code("sum 1~100"))"""
105
+ chat_text = text + f"""
106
+ The output should be a complete and usable {language} code snippet, including the leading and trailing "```{markdown_word}" and "```":
107
+ """
108
+ markdown = chat([{"role": "user", "content": chat_text}], temperature=temperature, **kwargs)
109
+ # 使用正则表达式匹配围绕在```{markdown_word} 和 ```之间的文本
110
+ pattern = rf'```{markdown_word}(.*?)```'
111
+ matches = re.findall(pattern, markdown, re.DOTALL)
112
+ if matches:
113
+ # 去除可能的前后空白字符
114
+ return matches[0].strip()
115
+ else:
116
+ raise Exception("The string is not a valid python code.")
117
+
118
+
119
+ if __name__ == "__main__":
120
+ print(chat([{"role": "user", "content": "你好"}]))
121
+ for chunk in chat_stream([{"role": "user", "content": "你好"}]):
122
+ print(chunk)
123
+ print(extract({"name": "lowercase"}, "hello XiaoMing"))
124
+ print(extract_code("sum 1~100"))
@@ -1,6 +1,5 @@
1
1
  # reference:https://github.com/maxtheman/opengpts/blob/d3425b1ba80aec48953a327ecd9a61b80efb0e69/backend/app/agent_types/openai_agent.py
2
2
  import json
3
-
4
3
  from langchain.tools import BaseTool
5
4
  from langchain_core.utils.function_calling import convert_to_openai_tool
6
5
  from langchain_core.language_models.base import LanguageModelLike
@@ -13,6 +12,7 @@ import json
13
12
  import random
14
13
  from .const import HumanToolString
15
14
 
15
+
16
16
  def get_all_markdown_json(content):
17
17
  # Find all markdown json blocks
18
18
  markdown_json_blocks = re.findall(r"```json(.*?)```", content, re.DOTALL)
@@ -107,8 +107,8 @@ def create_openai_func_call_agent_executor(
107
107
  last_message.content = last_message.content + "\n\n" # 避免影响阅读
108
108
  tools = get_tools(last_message)
109
109
  if last_message.tool_calls or tools:
110
- return 'continue'
111
- return 'end'
110
+ return "continue"
111
+ return "end"
112
112
 
113
113
  # Define the function to execute tools
114
114
  async def call_tool(messages):
@@ -140,9 +140,7 @@ def create_openai_func_call_agent_executor(
140
140
  responses = await tool_executor.abatch(actions, **kwargs)
141
141
  # We use the response to create a ToolMessage
142
142
  tool_messages = []
143
- for tool_call, response in zip(
144
- get_tools(last_message), responses
145
- ):
143
+ for tool_call, response in zip(get_tools(last_message), responses):
146
144
  if not isinstance(response, (str, int, float, bool, list, tuple)):
147
145
  response = repr(
148
146
  response
@@ -157,9 +155,7 @@ def create_openai_func_call_agent_executor(
157
155
  if tool_compatibility_mode:
158
156
  # HumanMessage
159
157
  tool_msgs_str = repr(tool_messages)
160
- tool_messages = [
161
- HumanMessage(content=HumanToolString + tool_msgs_str)
162
- ]
158
+ tool_messages = [HumanMessage(content=HumanToolString + tool_msgs_str)]
163
159
  return tool_messages
164
160
 
165
161
  workflow = MessageGraph()
pycoze/reference/tool.py CHANGED
@@ -1,15 +1,15 @@
1
- import sys
2
1
  import os
3
2
  import importlib
4
3
  from langchain.agents import tool as to_agent_tool
5
4
  import types
6
5
  import langchain_core
7
- from .lib import ChangeDirectoryAndPath, ModuleManager, wrapped_func
6
+ from .lib import ModuleManager, wrapped_func
8
7
  from pycoze import utils
9
8
 
10
9
 
11
10
  params = utils.params
12
11
 
12
+
13
13
  def ref_tools(tool_id, as_agent_tool=False, workspace_path=None):
14
14
  if workspace_path is None:
15
15
  workspace_path = params["workspacePath"]
@@ -27,7 +27,9 @@ def ref_tools(tool_id, as_agent_tool=False, workspace_path=None):
27
27
  export_tools = getattr(module, "export_tools")
28
28
  valid_tools = []
29
29
  for tool in export_tools:
30
- assert isinstance(tool, langchain_core.tools.StructuredTool) or isinstance(
30
+ assert isinstance(
31
+ tool, langchain_core.tools.StructuredTool
32
+ ) or isinstance(
31
33
  tool, types.FunctionType
32
34
  ), f"Tool is not a StructuredTool or function: {tool}"
33
35
  if not isinstance(tool, langchain_core.tools.StructuredTool):
@@ -1,56 +1,59 @@
1
- import sys
2
- import os
3
- import importlib
4
- from langchain.agents import tool as to_agent_tool
5
- import types
6
- import langchain_core
7
- from .lib import ModuleManager, wrapped_func
8
- from pycoze import utils
9
-
10
-
11
- params = utils.params
12
-
13
- def _ref_workflows(workflow_id, as_agent_tool=False, workspace_path=None):
14
- if workspace_path is None:
15
- workspace_path = params["workspacePath"]
16
- tool_base_path = os.path.join(workspace_path, "User/Local/workflow")
17
- module_path = os.path.join(tool_base_path, workflow_id)
18
- module_path = os.path.normpath(os.path.abspath(module_path))
19
-
20
- if not os.path.exists(module_path):
21
- print(f"Workflow {workflow_id} not found")
22
- return []
23
-
24
- try:
25
- with ModuleManager(module_path) as manager:
26
- module = importlib.import_module("tool")
27
- export_tools = getattr(module, "export_tools")
28
- valid_tools = []
29
- for tool in export_tools:
30
- assert isinstance(tool, langchain_core.tools.StructuredTool) or isinstance(
31
- tool, types.FunctionType
32
- ), f"Tool is not a StructuredTool or function: {tool}"
33
- if not isinstance(tool, langchain_core.tools.StructuredTool):
34
- tool = to_agent_tool(tool)
35
- valid_tools.append(tool)
36
- export_tools = valid_tools
37
-
38
- except Exception as e:
39
- print(f"Error loading workflow {workflow_id}: {e}")
40
- return []
41
-
42
- for tool in export_tools:
43
- tool.func = wrapped_func(tool, module_path)
44
- if tool.description is None:
45
- tool.description = "This tool is used to " + tool.name + "."
46
-
47
- return export_tools if as_agent_tool else [tool.func for tool in export_tools]
48
-
49
-
50
- def ref_workflow(workflow_id, as_agent_tool=False, workspace_path=None):
51
- tools = _ref_workflows(workflow_id, as_agent_tool=as_agent_tool, workspace_path=workspace_path)
52
- if len(tools) > 0:
53
- return tools[0]
54
- else:
55
- return None
56
-
1
+ import os
2
+ import importlib
3
+ from langchain.agents import tool as to_agent_tool
4
+ import types
5
+ import langchain_core
6
+ from .lib import ModuleManager, wrapped_func
7
+ from pycoze import utils
8
+
9
+
10
+ params = utils.params
11
+
12
+
13
+ def _ref_workflows(workflow_id, as_agent_tool=False, workspace_path=None):
14
+ if workspace_path is None:
15
+ workspace_path = params["workspacePath"]
16
+ tool_base_path = os.path.join(workspace_path, "User/Local/workflow")
17
+ module_path = os.path.join(tool_base_path, workflow_id)
18
+ module_path = os.path.normpath(os.path.abspath(module_path))
19
+
20
+ if not os.path.exists(module_path):
21
+ print(f"Workflow {workflow_id} not found")
22
+ return []
23
+
24
+ try:
25
+ with ModuleManager(module_path) as manager:
26
+ module = importlib.import_module("tool")
27
+ export_tools = getattr(module, "export_tools")
28
+ valid_tools = []
29
+ for tool in export_tools:
30
+ assert isinstance(
31
+ tool, langchain_core.tools.StructuredTool
32
+ ) or isinstance(
33
+ tool, types.FunctionType
34
+ ), f"Tool is not a StructuredTool or function: {tool}"
35
+ if not isinstance(tool, langchain_core.tools.StructuredTool):
36
+ tool = to_agent_tool(tool)
37
+ valid_tools.append(tool)
38
+ export_tools = valid_tools
39
+
40
+ except Exception as e:
41
+ print(f"Error loading workflow {workflow_id}: {e}")
42
+ return []
43
+
44
+ for tool in export_tools:
45
+ tool.func = wrapped_func(tool, module_path)
46
+ if tool.description is None:
47
+ tool.description = "This tool is used to " + tool.name + "."
48
+
49
+ return export_tools if as_agent_tool else [tool.func for tool in export_tools]
50
+
51
+
52
+ def ref_workflow(workflow_id, as_agent_tool=False, workspace_path=None):
53
+ tools = _ref_workflows(
54
+ workflow_id, as_agent_tool=as_agent_tool, workspace_path=workspace_path
55
+ )
56
+ if len(tools) > 0:
57
+ return tools[0]
58
+ else:
59
+ return None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pycoze
3
- Version: 0.1.285
3
+ Version: 0.1.288
4
4
  Summary: Package for pycoze only!
5
5
  Home-page: UNKNOWN
6
6
  Author: Yuan Jie Xiong
@@ -2,7 +2,7 @@ pycoze/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  pycoze/ai/__init__.py,sha256=amfVImcuiRRnGbmOjIqjXKiuH1H3zZPHOrJue2dEB8U,355
3
3
  pycoze/ai/vram_reserve.py,sha256=DRxKzqf89fLAts0DzU8e19z9kecIF8OdMkQnJlCKZV0,4244
4
4
  pycoze/ai/llm/__init__.py,sha256=5_AnOrzXI2V6ZZsLUWW1v5ATRWmJy53JLN9jfSZQXCg,249
5
- pycoze/ai/llm/chat.py,sha256=izriC7nCp5qeJRqcUVQBVqTHiH6MJS77ROzGBJufdNI,5133
5
+ pycoze/ai/llm/chat.py,sha256=_aJo0KzqA2m4lLql-9AqTh9f6mJmwi80K_tam7F2ehU,4882
6
6
  pycoze/ai/llm/text_to_image_prompt.py,sha256=0bx2C_YRvjAo7iphHGp1-pmGKsKqwur7dM0t3SiA8kA,3398
7
7
  pycoze/ai/llm/think.py,sha256=sUgTBdGzcZtL3r-Wx8M3lDuVUmDVz8g3qC0VU8uiKAI,5143
8
8
  pycoze/api/__init__.py,sha256=GGRRRPop0ZxdXe5JRhg2XvHITGIWfNcHA25opJZ0f1w,313
@@ -19,12 +19,12 @@ pycoze/bot/agent/assistant.py,sha256=5LIgPIVVzx6uIOWT5S_XDDyPPjPHRBBNpIU3GiOkVHc
19
19
  pycoze/bot/agent/chat.py,sha256=9wZ24CPdSbSnPCWmCQJle05U5VlDGgZhZ9z1mezLst0,816
20
20
  pycoze/bot/agent/agent_types/__init__.py,sha256=zmU2Kmrv5mCdfg-QlPn2H6pWxbGeq8s7YTqLhpzJC6k,179
21
21
  pycoze/bot/agent/agent_types/const.py,sha256=BfUKPrhAHREoMLHuFNG2bCIEkC1-f7K0LEqNg4RwiRE,70
22
- pycoze/bot/agent/agent_types/openai_func_call_agent.py,sha256=6aKnUQDINyUaCW24oa9Qjkm5w3ctZ6lxAgcE4m9YHwE,6701
22
+ pycoze/bot/agent/agent_types/openai_func_call_agent.py,sha256=3qOyrddujtJ50W9SbH5bapbVTwjgE_LC2TnYJWUH9yc,6649
23
23
  pycoze/reference/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
24
  pycoze/reference/bot.py,sha256=d5uB0OH1MF3LXGoBAF6vX6y50IV3sOrNi_5hXAHAmGU,2255
25
25
  pycoze/reference/lib.py,sha256=0xQJTLTHedGzQBsjuTFNBVqYc4-8Yl65gGCrAhWyOX8,2155
26
- pycoze/reference/tool.py,sha256=CYUS95M_XGnUcl4uWayjzKOM9jGpylS7vWJ66JfNTjI,1710
27
- pycoze/reference/workflow.py,sha256=LiubvvhGa6KoVIP8W8D8zTZaL595M4Dorsa_kxZ8UgQ,2028
26
+ pycoze/reference/tool.py,sha256=_dggSWn-oC_reB8TuNOPl48Xr-pgwJHF7XmzlIw6lsQ,1714
27
+ pycoze/reference/workflow.py,sha256=22y8yyJgBMcGShBbqKpUGFl2CM9dh646LsM8r-1UKcs,2013
28
28
  pycoze/ui/__init__.py,sha256=uaXet23wUk64TcZjpBX8qOx4aUhwA_ucrmcxy7Q4Qr4,929
29
29
  pycoze/ui/base.py,sha256=bz9mHZwIXA8LErEHTIonH347u6LP7rxV2EADMMjNZos,1081
30
30
  pycoze/ui/color.py,sha256=cT9Ib8uNzkOKxyW0IwVj46o4LwdB1xgNCj1_Rou9d_4,854
@@ -35,8 +35,8 @@ pycoze/utils/arg.py,sha256=jop1tBfe5hYkHW1NSpCeaZBEznkgguBscj_7M2dWfrs,503
35
35
  pycoze/utils/env.py,sha256=5pWlXfM1F5ZU9hhv1rHlDEanjEW5wf0nbyez9bNRqqA,559
36
36
  pycoze/utils/socket.py,sha256=bZbFFRH4mfThzRqt55BAAGQ6eICx_ja4x8UGGrUdAm8,2428
37
37
  pycoze/utils/text_or_file.py,sha256=gpxZVWt2DW6YiEg_MnMuwg36VNf3TX383QD_1oZNB0Y,551
38
- pycoze-0.1.285.dist-info/LICENSE,sha256=QStd_Qsd0-kAam_-sOesCIp_uKrGWeoKwt9M49NVkNU,1090
39
- pycoze-0.1.285.dist-info/METADATA,sha256=Qb3Dk-FsO1-K9xMul-gH7ds6KoH56qCjAH2RpUJPNqY,755
40
- pycoze-0.1.285.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92
41
- pycoze-0.1.285.dist-info/top_level.txt,sha256=76dPeDhKvOCleL3ZC5gl1-y4vdS1tT_U1hxWVAn7sFo,7
42
- pycoze-0.1.285.dist-info/RECORD,,
38
+ pycoze-0.1.288.dist-info/LICENSE,sha256=QStd_Qsd0-kAam_-sOesCIp_uKrGWeoKwt9M49NVkNU,1090
39
+ pycoze-0.1.288.dist-info/METADATA,sha256=72pjOMZ7pPYKaJ7PLzFV-fa2zfNZkfCHuulwkvb2HOs,755
40
+ pycoze-0.1.288.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92
41
+ pycoze-0.1.288.dist-info/top_level.txt,sha256=76dPeDhKvOCleL3ZC5gl1-y4vdS1tT_U1hxWVAn7sFo,7
42
+ pycoze-0.1.288.dist-info/RECORD,,