mlchat 1.0.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1 @@
1
+ recursive-include mlchat *
mlchat-1.0.8/PKG-INFO ADDED
@@ -0,0 +1,19 @@
1
+ Metadata-Version: 2.4
2
+ Name: mlchat
3
+ Version: 1.0.8
4
+ Summary: opean+mcp调用工具
5
+ Home-page: https://www.python.org
6
+ Author: mengling
7
+ Author-email: 1321443305@qq.com
8
+ Requires-Python: >=3.8
9
+ Requires-Dist: loguru
10
+ Requires-Dist: openai
11
+ Requires-Dist: mcp>=1.25.0
12
+ Requires-Dist: pydantic
13
+ Requires-Dist: json-repair
14
+ Dynamic: author
15
+ Dynamic: author-email
16
+ Dynamic: home-page
17
+ Dynamic: requires-dist
18
+ Dynamic: requires-python
19
+ Dynamic: summary
@@ -0,0 +1,256 @@
1
+ import asyncio
2
+ import anyio
3
+ from pydantic import BaseModel
4
+ import json
5
+ import warnings
6
+ from mcp import ClientSession, StdioServerParameters
7
+ from openai import AsyncOpenAI, NOT_GIVEN
8
+ from loguru import logger as _log
9
+ from typing import AsyncIterator
10
+ from contextlib import AsyncExitStack
11
+ from mcp.client.stdio import stdio_client
12
+ from mcp.client.sse import sse_client
13
+ from mcp.client.streamable_http import streamable_http_client
14
+ from openai.types.chat.chat_completion_chunk import ChatCompletionChunk,ChoiceDeltaToolCall, ChoiceDeltaToolCallFunction
15
+ from .extension import DialogueMessager, aget_trace, aget_title, ToolCeil, Messager, Tooler, Server
16
+
17
+
18
+ class _ToolCallStream(BaseModel):
19
+ id: str
20
+ name: str
21
+ arguments: str =''
22
+
23
+ def arg_add(self, data: ChoiceDeltaToolCall):
24
+ if not data: return
25
+ if not data.id:
26
+ self.arguments += data.function.arguments or ''
27
+ else:
28
+ self.id = data.id
29
+ if data.function.name: self.name = data.function.name
30
+ self.arguments = data.function.arguments or ''
31
+
32
+ def to_cdtc(self, index:int)->ChoiceDeltaToolCall:
33
+ return ChoiceDeltaToolCall(index=index, id=self.id,
34
+ function=ChoiceDeltaToolCallFunction(arguments=self.arguments or '{}', name=self.name),
35
+ type='function')
36
+
37
+ class AchatIt:
38
+ def __init__(self, ait:AsyncIterator[Messager]):
39
+ self._it = ait
40
+
41
+ async def __anext__(self)->Messager:
42
+ return await self._it.__anext__()
43
+
44
+ def __aiter__(self):
45
+ return self
46
+
47
+ async def to_list(self, get_dict=False)->list[Messager|dict]:
48
+ return [(mes.to_messager() if get_dict else mes) async for mes in self]
49
+
50
+ async def first(self)->Messager:
51
+ async for mes in self:
52
+ if mes.is_finish: return mes
53
+
54
+ async def last(self)->Messager:
55
+ return (await self.to_list())[-1]
56
+
57
+ class MCPClient:
58
+ def __init__(self, logger=None):
59
+ self.logger=logger or _log
60
+ self.exit_stack = AsyncExitStack()
61
+ self.tool_map:dict[str, Tooler] = {}
62
+ self.server_map:dict[str, Server] = {}
63
+
64
+ async def _connect_to_server(self, server_name, session:ClientSession, strict:bool|None):
65
+ await session.initialize()
66
+ server = Server(name=session, cs=session, tools=[])
67
+ self.server_map[server_name]=server
68
+ # 更新工具映射
69
+ response = await session.list_tools()
70
+ for tool in response.tools:
71
+ # 构建统一的工具列表
72
+ tooler = Tooler(name=tool.name, server=server, tc=ToolCeil.from_mcp_tool(tool, strict=strict))
73
+ self.tool_map[tool.name]=tooler
74
+ server.tools.append(tooler)
75
+ self.logger.debug(tooler.tc)
76
+ self.logger.info(f"已连接到MCP服务器 - {server_name}\n{[tooler.name for tooler in server.tools]}")
77
+
78
+ async def connect_to_stdio_server(self, server_name:str, command:str, *args: str, env:dict=None, strict:bool|None=None):
79
+ server_params = StdioServerParameters(command=command, args=args, env=env)
80
+ read_stream, write_stream = await self.exit_stack.enter_async_context(stdio_client(server_params))
81
+ session = await self.exit_stack.enter_async_context(ClientSession(read_stream, write_stream))
82
+ return await self._connect_to_server(server_name, session, strict)
83
+
84
+ async def connect_to_http_server(self, server_name:str, url:str, strict:bool|None=None, **kwargs):
85
+ read_stream, write_stream,_ = await self.exit_stack.enter_async_context(streamable_http_client(url, **kwargs))
86
+ session = await self.exit_stack.enter_async_context(ClientSession(read_stream, write_stream))
87
+ return await self._connect_to_server(server_name, session, strict)
88
+
89
+ async def connect_to_sse_server(self, server_name:str, url:str, strict:bool|None=None, **kwargs):
90
+ read_stream, write_stream = await self.exit_stack.enter_async_context(sse_client(url, **kwargs))
91
+ session = await self.exit_stack.enter_async_context(ClientSession(read_stream, write_stream))
92
+ return await self._connect_to_server(server_name, session, strict)
93
+
94
+ async def connect_to_config(self, config_or_path:dict|str, strict:bool|None=None):
95
+ if isinstance(config_or_path, str):
96
+ with open(config_or_path, encoding="utf-8") as f:
97
+ config = json.load(f)
98
+ else:
99
+ config = config_or_path
100
+ for server_name, server_config in config['mcpServers'].items():
101
+ if server_config.get("command"):
102
+ await self.connect_to_stdio_server(server_name, server_config["command"], *server_config.get("args",[]),
103
+ env=server_config.get("env"), strict=strict)
104
+ elif server_config.get("url"):
105
+ if server_config.get("type", 'http').lower() == 'sse':
106
+ await self.connect_to_sse_server(server_name, server_config["url"], strict=strict)
107
+ else:
108
+ await self.connect_to_http_server(server_name, server_config["url"], strict=strict)
109
+ else:
110
+ warnings.warn(f"未指定command或url, 无法连接到 MCP 服务器 {server_name}")
111
+
112
+ def set_tool_description(self, name:str, description:str):
113
+ """重新设置工具描述"""
114
+ self.tool_map[name].tc.description = description
115
+
116
+ @property
117
+ def tool_keys(self)->list[str]:
118
+ return list(self.tool_map.keys())
119
+
120
+ @property
121
+ def tool_list(self)->list[ToolCeil]:
122
+ return [ter.tc for ter in self.tool_map.values()]
123
+
124
+ async def call_tool(self, tool_name:str, args:dict|str):
125
+ tooler = self.tool_map[tool_name]
126
+ args = json.loads(args) if isinstance(args, str) else args
127
+ self.logger.info(f"tool: {tool_name} args: {args}")
128
+ try:
129
+ result = await tooler.server.cs.call_tool(tool_name, args)
130
+ except anyio.ClosedResourceError:
131
+ self.logger.info(f'{tool_name} 重新连接...')
132
+ await tooler.server.cs.initialize()
133
+ result = await tooler.server.cs.call_tool(tool_name, args)
134
+ return result
135
+
136
+ async def close(self):
137
+ await self.exit_stack.aclose()
138
+ self.server_map.clear()
139
+ self.tool_map.clear()
140
+
141
+ class MockUpClient(MCPClient):
142
+ def __init__(self, base_url, model, api_key='EMPTY', logger=None):
143
+ super().__init__(logger)
144
+ self.model = model
145
+ self.aclient = AsyncOpenAI(api_key=api_key, base_url=base_url)
146
+
147
+ async def _call_tool(self, tool_call:ChoiceDeltaToolCall):
148
+ tool_name = tool_call.function.name
149
+ tool_args = json.loads(tool_call.function.arguments)
150
+ # 根据工具名称找到对应的服务端
151
+ result = await self.call_tool(tool_name, tool_args)
152
+ return tool_call.id, tool_name, tool_args, result.content
153
+
154
+ def get_tool_choice(self, *tool_names:str):
155
+ """部分模型并不支持单个或多个工具指定"""
156
+ if not tool_names:
157
+ return 'auto'
158
+ elif len(tool_names)==1:
159
+ return {
160
+ "type": "function",
161
+ "function": {"name": tool_names[0]}
162
+ }
163
+ else:
164
+ return {
165
+ 'type': 'allowed_tools',
166
+ 'allowed_tools': {
167
+ 'mode':'required',
168
+ 'tools':[{ "type": "function", "function": { "name": tool_name }} for tool_name in tool_names]
169
+ }
170
+ }
171
+
172
+ def chat(self, messages:list[Messager|dict], max_tool_num=3, use_tool_name:str|list[str]=None, custom_tools:list[ToolCeil]=None, select_servers:list[str]=None, **kwargs)->AchatIt:
173
+ """调用大模型处理用户查询,并根据返回的 tools 列表调用对应工具。
174
+ 支持多次工具调用,直到所有工具调用完成。
175
+ 流式输出
176
+ Args:
177
+ query (str): 查询
178
+ max_num (int, optional): 最大工具循环调用次数. Defaults to 3.
179
+ user_tool (str): 强制调用工具title或name, 没有则该参数无效
180
+ custom_tools (list[ToolCeil]): 自定义调用的工具组, 设置该参数则本身的工具不会调用
181
+ select_servers (list[str]): 选择生效的服务名组, 仅custom_tools为空时生效
182
+ Returns:
183
+ AchatIt: 自定义迭代器
184
+ """
185
+ async def _chat():
186
+ the_messages = []
187
+ for message in messages:
188
+ mer = Messager(**message) if isinstance(message, dict) else message
189
+ the_messages.append(mer.to_messager())
190
+ if custom_tools:
191
+ tools = custom_tools
192
+ else:
193
+ tools = []
194
+ for name in (select_servers or self.server_map.keys()):
195
+ ser = self.server_map.get(name)
196
+ if not ser: continue
197
+ tools.extend(ter.tc for ter in ser.tools)
198
+ available_tools = [tool.to_tool() for tool in tools]
199
+ # 循环处理工具调用
200
+ for i in range(max_tool_num+1):
201
+ tool_choice = 'auto'
202
+ # 超出最大调用工具限制, 最后一次不再加载工具
203
+ if i<max_tool_num:
204
+ # 仅首次会调用指定工具
205
+ if i==0 and use_tool_name:
206
+ tool_choice = self.get_tool_choice(*([use_tool_name] if isinstance(use_tool_name, str) else use_tool_name))
207
+ else:
208
+ available_tools = None
209
+ tcdt:dict[int, _ToolCallStream] = {}
210
+ chunk:ChatCompletionChunk
211
+ message = Messager(role="assistant", is_finish=False)
212
+ async for chunk in await self.aclient.chat.completions.create(
213
+ model=self.model,
214
+ messages=the_messages,
215
+ tools=available_tools,
216
+ tool_choice=tool_choice if available_tools else NOT_GIVEN,
217
+ **kwargs,
218
+ stream=True
219
+ ):
220
+ if chunk.choices:
221
+ message.chunk = chunk.choices[0].delta.content or ''
222
+ tool_calls = chunk.choices[0].delta.tool_calls
223
+ else:
224
+ message.chunk, tool_calls = '', None
225
+ if tool_calls:
226
+ for tool_call in tool_calls:
227
+ if tcdt.get(tool_call.index) is None:
228
+ tcdt[tool_call.index] = _ToolCallStream(id=tool_call.id,name=tool_call.function.name)
229
+ tcdt[tool_call.index].arg_add(tool_call)
230
+ message.content += message.chunk
231
+ yield message
232
+ message.tool_calls = [data.to_cdtc(index) for index,data in tcdt.items()] or None
233
+ message.is_finish = True
234
+ yield message
235
+ the_messages.append(message.to_messager())
236
+ # 没有工具调用则结束
237
+ if not message.tool_calls: break
238
+ # 执行实际工具调用
239
+ for tool_call_id, tool_name, tool_args, rcs in await asyncio.gather(*[self._call_tool(tool_call) for tool_call in message.tool_calls]):
240
+ # 将工具调用的结果添加到 messages 中, 暂时只处理文本返回内容
241
+ for rc in rcs:
242
+ tmessage = Messager(role="tool", content=rc.text, name=tool_name, args=tool_args, tool_call_id=tool_call_id, is_finish=True)
243
+ yield tmessage
244
+ the_messages.append(tmessage.to_messager())
245
+
246
+ return AchatIt(_chat())
247
+
248
+ async def get_summary_title(self, historys:list[DialogueMessager])->str:
249
+ """根据最近的历史记录生成总结性的标题"""
250
+ if not historys: return ''
251
+ return await aget_title(self.aclient, self.model, historys)
252
+
253
+ async def get_traces(self, historys:list[DialogueMessager], trace_num:int=3)->list[str]:
254
+ """根据最近的历史记录生成追问"""
255
+ if not historys: return []
256
+ return await aget_trace(self.aclient, self.model, historys, trace_num=trace_num)
@@ -0,0 +1,194 @@
1
+ from dataclasses import dataclass
2
+ from datetime import datetime
3
+ from typing import Literal, Self
4
+ from openai import AsyncOpenAI
5
+ from pydantic import BaseModel, field_validator, Field
6
+ from openai.types.chat.chat_completion_chunk import ChoiceDeltaToolCall
7
+ from mcp import ClientSession, Tool as _Tool
8
+ import json
9
+
10
+
11
+ class DialogueMessager(BaseModel):
12
+ role: Literal['assistant', 'user']
13
+ content: str|list[dict] = ''
14
+
15
+ @field_validator('content', mode='before')
16
+ def _strip(cls, value, data):
17
+ if isinstance(value, str):
18
+ value = value.strip()
19
+ elif hasattr(value, 'text'):
20
+ setattr(value, 'text', getattr(value, 'text').strip())
21
+ return value
22
+
23
+ class Messager(DialogueMessager):
24
+ role: Literal['developer', 'system', 'assistant', 'user', 'tool']
25
+ chunk: str|None = None
26
+ name: str|None = None
27
+ args: dict|list|None = None
28
+ tool_call_id: str|None = None
29
+ tool_calls: list[ChoiceDeltaToolCall]|None = None
30
+ is_finish:bool = True
31
+ time:str = Field(default_factory=lambda: datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
32
+ file_map:dict[Literal['image', 'video', 'audio', 'file'], list[dict|str]]|None = None
33
+
34
+ @property
35
+ def is_tool(self)->bool:
36
+ return self.role == 'tool'
37
+
38
+ @property
39
+ def is_assistant(self):
40
+ return self.role == 'assistant'
41
+
42
+ @property
43
+ def is_user(self):
44
+ return self.role == 'user'
45
+
46
+ @property
47
+ def is_dialogue(self):
48
+ return bool(self.content and self.role in ('assistant','user'))
49
+
50
+ @property
51
+ def debug_log(self)->str:
52
+ if self.role == 'assistant' and not self.content:
53
+ log = '; '.join(f'{tool.function.name} {tool.function.arguments}'for tool in self.tool_calls)
54
+ else:
55
+ log = self.content.split('\n')
56
+ if len(log)>1 or len(log[0])>100:
57
+ log = log[0][:100]+'...'
58
+ else:
59
+ log = log[0]
60
+ return f"{self.role}: {log}"
61
+
62
+ @property
63
+ def tool_call_arguments(self)->list[dict]:
64
+ return [json.loads(tool_call.function.arguments) for tool_call in (self.tool_calls or ())]
65
+
66
+ def to_messager(self)->dict:
67
+ dt = self.model_dump(exclude_none=True, exclude={'content', 'chunk', 'is_finish', 'time', 'file_map'})
68
+ if self.file_map:
69
+ dt['content'] = [*self.file_map.get('image', ()), *self.file_map.get('video', ()), *self.file_map.get('audio', ())]
70
+ content = self.content
71
+ if self.file_map.get('file'):
72
+ filetemplate = '\n- '.join(['## 上传其他文件列表', *[f'[{f['name']}]({f['url']})' for f in self.file_map['file']]])
73
+ content = f'{filetemplate}\n\n{content}'
74
+ dt['content'].append({'type': 'text', 'text': content })
75
+ else:
76
+ dt['content'] = self.content
77
+ return dt
78
+
79
+ def addFile(self, *, imgs:list[str|dict]=(), videos:list[str|dict]=(), audios:list[dict]=(), other_files:list[dict]=())->Self:
80
+ self.file_map = self.file_map or {}
81
+ if imgs:
82
+ self.file_map['image'] = self.file_map.get('image', [])
83
+ self.file_map['image'].extend([{'type': 'image_url', 'image_url': { 'url': img } if isinstance(img, str) else img} for img in imgs])
84
+ if videos:
85
+ self.file_map['video'] = self.file_map.get('video', [])
86
+ self.file_map['video'].extend([{'type': 'video_url', 'video_url': { 'url': video } if isinstance(video, str) else video} for video in videos])
87
+ if audios:
88
+ self.file_map['audio'] = self.file_map.get('audio', [])
89
+ self.file_map['audio'].extend([{'type': 'input_audio', 'input_audio': audio} for audio in audios])
90
+ if other_files:
91
+ self.file_map['file'] = self.file_map.get('file', [])
92
+ self.file_map['file'].extend([{'name': file['name'], 'url': file['url']} for file in other_files])
93
+ return self
94
+
95
+ class ToolCeil(BaseModel):
96
+ name: str
97
+ description: str
98
+ parameters: dict
99
+ strict: bool|None = None
100
+
101
+ def to_tool(self)->dict:
102
+ return {
103
+ "type": "function",
104
+ "function": {
105
+ "name": self.name,
106
+ "description": self.description,
107
+ "parameters": self.parameters,
108
+ 'strict': self.strict
109
+ }
110
+ }
111
+
112
+ @staticmethod
113
+ def from_mcp_tool(tool:_Tool, strict:bool=None)->'ToolCeil':
114
+ return ToolCeil(name=tool.name, description=tool.description, parameters=tool.inputSchema, strict=strict)
115
+
116
+ @staticmethod
117
+ def from_base(name:str, base:BaseModel, description:str=None, strict:bool=None)->'ToolCeil':
118
+ bjson = base.model_json_schema()
119
+ description0 = bjson.pop('description', None)
120
+ description = description or description0
121
+ if not description: raise ValueError('description is None')
122
+ return ToolCeil(name=name, description=description.strip(), parameters=bjson, strict=strict)
123
+
124
+ @dataclass
125
+ class Tooler:
126
+ name:str
127
+ server: 'Server'
128
+ tc:ToolCeil
129
+
130
+ @dataclass
131
+ class Server:
132
+ name:str
133
+ cs:ClientSession
134
+ tools:list[Tooler]
135
+
136
+ def _get_chat_history_text(messagers:list[DialogueMessager])->str:
137
+ return '\n'.join(f'{messager.role}: {messager.content}' for messager in messagers if messager.content)
138
+
139
+ async def _get_result(aclient:AsyncOpenAI, model:str, prompt:str)->dict:
140
+ response = await aclient.chat.completions.create(model=model,
141
+ messages=[{'role': 'user', 'content': prompt}],
142
+ stream=False)
143
+ import json_repair
144
+ return json_repair.loads(response.choices[0].message.content)
145
+
146
+ _trace_tp = '''### Task:
147
+ Suggest {trace_num} relevant follow-up questions or prompts that the user might naturally ask next in this conversation as a **user**, based on the chat history, to help continue or deepen the discussion.
148
+ ### Guidelines:
149
+ - Write all follow-up questions from the user’s point of view, directed to the assistant.
150
+ - Make questions concise, clear, and directly related to the discussed topic(s).
151
+ - Only suggest follow-ups that make sense given the chat content and do not repeat what was already covered.
152
+ - If the conversation is very short or not specific, suggest more general (but relevant) follow-ups the user might ask.
153
+ - Use the conversation's primary language; default to English if multilingual.
154
+ - Response must be a JSON array of strings, no extra text or formatting.
155
+ ### Output:
156
+ JSON format: {{ "follow_ups": ["Question 1?", "Question 2?", "Question 3?"] }}
157
+ ### Chat History:
158
+ <chat_history>
159
+ {chat_history}
160
+ </chat_history>'''
161
+
162
+ async def aget_trace(aclient:AsyncOpenAI, model:str, messagers:list[DialogueMessager], trace_num:int=3)->list[str]:
163
+ """获取根据历史对话生成追问"""
164
+ result = await _get_result(aclient, model, _trace_tp.format(trace_num=trace_num, chat_history=_get_chat_history_text(messagers)))
165
+ return result.get('follow_ups', [])
166
+
167
+ _title_tp ='''### Task:
168
+ Generate a concise, 3-5 word title with an emoji summarizing the chat history.
169
+ ### Guidelines:
170
+ - The title should clearly represent the main theme or subject of the conversation.
171
+ - Use emojis that enhance understanding of the topic, but avoid quotation marks or special formatting.
172
+ - Write the title in the chat's primary language; default to English if multilingual.
173
+ - Prioritize accuracy over excessive creativity; keep it clear and simple.
174
+ - Your entire response must consist solely of the JSON object, without any introductory or concluding text.
175
+ - The output must be a single, raw JSON object, without any markdown code fences or other encapsulating text.
176
+ - Ensure no conversational text, affirmations, or explanations precede or follow the raw JSON output, as this will cause direct parsing failure.
177
+ ### Output:
178
+ JSON format: {{ "title": "your concise title here" }}
179
+ ### Examples:
180
+ - { "title": "📉 Stock Market Trends" },
181
+ - { "title": "🍪 Perfect Chocolate Chip Recipe" },
182
+ - { "title": "Evolution of Music Streaming" },
183
+ - { "title": "Remote Work Productivity Tips" },
184
+ - { "title": "Artificial Intelligence in Healthcare" },
185
+ - { "title": "🎮 Video Game Development Insights" }
186
+ ### Chat History:
187
+ <chat_history>
188
+ {chat_history}
189
+ </chat_history>'''
190
+
191
+ async def aget_title(aclient:AsyncOpenAI, model:str, messagers:list[DialogueMessager])->str:
192
+ """获取根据历史对话生成总结性的标题"""
193
+ result = await _get_result(aclient, model, _title_tp.format(chat_history=_get_chat_history_text(messagers)))
194
+ return result.get('title', '')
@@ -0,0 +1,19 @@
1
+ Metadata-Version: 2.4
2
+ Name: mlchat
3
+ Version: 1.0.8
4
+ Summary: opean+mcp调用工具
5
+ Home-page: https://www.python.org
6
+ Author: mengling
7
+ Author-email: 1321443305@qq.com
8
+ Requires-Python: >=3.8
9
+ Requires-Dist: loguru
10
+ Requires-Dist: openai
11
+ Requires-Dist: mcp>=1.25.0
12
+ Requires-Dist: pydantic
13
+ Requires-Dist: json-repair
14
+ Dynamic: author
15
+ Dynamic: author-email
16
+ Dynamic: home-page
17
+ Dynamic: requires-dist
18
+ Dynamic: requires-python
19
+ Dynamic: summary
@@ -0,0 +1,11 @@
1
+ MANIFEST.in
2
+ setup.py
3
+ mlchat/__init__.py
4
+ mlchat/extension.py
5
+ mlchat.egg-info/PKG-INFO
6
+ mlchat.egg-info/SOURCES.txt
7
+ mlchat.egg-info/dependency_links.txt
8
+ mlchat.egg-info/requires.txt
9
+ mlchat.egg-info/top_level.txt
10
+ mlchat/__pycache__/__init__.cpython-312.pyc
11
+ mlchat/__pycache__/extension.cpython-312.pyc
@@ -0,0 +1,5 @@
1
+ loguru
2
+ openai
3
+ mcp>=1.25.0
4
+ pydantic
5
+ json-repair
@@ -0,0 +1 @@
1
+ mlchat
mlchat-1.0.8/setup.cfg ADDED
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
mlchat-1.0.8/setup.py ADDED
@@ -0,0 +1,24 @@
1
+ import setuptools
2
+ import json
3
+
4
+ # 参考:https://www.jb51.net/article/202841.htm
5
+ # 打包需将此文件和MANIFEST.in文件置于mengling_tool包同目录
6
+ # 包中必须有__init__.py文件存在才能在pip时正常导入
7
+ # pip install --upgrade setuptools wheel -i https://pypi.douban.com/simple
8
+ # python setup.py sdist bdist_wheel
9
+ # pip install twine
10
+ # twine upload dist/*
11
+ '''
12
+ python setup.py sdist bdist_wheel
13
+ twine upload -u user -p password dist/*
14
+ '''
15
+
16
+ name = 'mlchat'
17
+
18
+ with open('../config.json', encoding='utf-8') as file:
19
+ definfo, opmap = json.loads(file.read())
20
+ setuptools.setup(
21
+ name=name,
22
+ packages=setuptools.find_packages(),
23
+ **{**definfo, **opmap[name]}
24
+ )