cnllm 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cnllm-0.1.0/LICENSE +21 -0
- cnllm-0.1.0/PKG-INFO +257 -0
- cnllm-0.1.0/README.md +223 -0
- cnllm-0.1.0/cnllm/__init__.py +5 -0
- cnllm-0.1.0/cnllm/adapters/__init__.py +0 -0
- cnllm-0.1.0/cnllm/adapters/minimax/__init__.py +0 -0
- cnllm-0.1.0/cnllm/adapters/minimax/chat.py +116 -0
- cnllm-0.1.0/cnllm/client.py +73 -0
- cnllm-0.1.0/cnllm/core/__init__.py +0 -0
- cnllm-0.1.0/cnllm/core/base.py +88 -0
- cnllm-0.1.0/cnllm/core/config.py +14 -0
- cnllm-0.1.0/cnllm/core/exceptions.py +5 -0
- cnllm-0.1.0/cnllm/core/types.py +13 -0
- cnllm-0.1.0/cnllm/example.py +13 -0
- cnllm-0.1.0/cnllm/utils/__init__.py +0 -0
- cnllm-0.1.0/cnllm/utils/cleaner.py +12 -0
- cnllm-0.1.0/cnllm.egg-info/PKG-INFO +257 -0
- cnllm-0.1.0/cnllm.egg-info/SOURCES.txt +21 -0
- cnllm-0.1.0/cnllm.egg-info/dependency_links.txt +1 -0
- cnllm-0.1.0/cnllm.egg-info/requires.txt +9 -0
- cnllm-0.1.0/cnllm.egg-info/top_level.txt +1 -0
- cnllm-0.1.0/pyproject.toml +70 -0
- cnllm-0.1.0/setup.cfg +4 -0
cnllm-0.1.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 CNLLM Contributors
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
cnllm-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: cnllm
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: 统一的中文大模型适配库,将中国大模型 API 输出转换为 OpenAI 格式,无缝接入openai、langchain等任何openai结构适配的python库
|
|
5
|
+
Author-email: kanchengw <wangkancheng1122@163.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/kanchengw/cnllm
|
|
8
|
+
Project-URL: Documentation, https://github.com/kanchengw/cnllm#readme
|
|
9
|
+
Project-URL: Repository, https://github.com/kanchengw/cnllm
|
|
10
|
+
Project-URL: Issues, https://github.com/kanchengw/cnllm/issues
|
|
11
|
+
Keywords: ai,llm,openai,langchain,minimax,chinese-llm,adapter
|
|
12
|
+
Classifier: Development Status :: 3 - Alpha
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
22
|
+
Requires-Python: >=3.8
|
|
23
|
+
Description-Content-Type: text/markdown
|
|
24
|
+
License-File: LICENSE
|
|
25
|
+
Requires-Dist: requests>=2.25.0
|
|
26
|
+
Requires-Dist: python-dotenv>=0.19.0
|
|
27
|
+
Provides-Extra: dev
|
|
28
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
29
|
+
Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
|
|
30
|
+
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
31
|
+
Requires-Dist: flake8>=6.0.0; extra == "dev"
|
|
32
|
+
Requires-Dist: mypy>=1.0.0; extra == "dev"
|
|
33
|
+
Dynamic: license-file
|
|
34
|
+
|
|
35
|
+
# CNLLM - Chinese LLM Adapter
|
|
36
|
+
|
|
37
|
+
[English](README_en.md) | 中文
|
|
38
|
+
|
|
39
|
+

|
|
40
|
+

|
|
41
|
+

|
|
42
|
+

|
|
43
|
+

|
|
44
|
+
|
|
45
|
+
---
|
|
46
|
+
|
|
47
|
+
统一的中文大模型适配库,将各种国产大模型(如 MiniMax、字节豆包、Kimi 等)的 API 输出转换为统一的 OpenAI 格式,零成本接入 LangChain、AutoGen 等主流 AI 框架。
|
|
48
|
+
|
|
49
|
+
## 特性
|
|
50
|
+
|
|
51
|
+
- **OpenAI 兼容** - 所有输出完全对齐 OpenAI API 标准格式
|
|
52
|
+
- **LangChain 原生支持** - 可直接使用 LangChain 的消息类型和工具函数
|
|
53
|
+
- **统一接口** - 一套代码,无缝切换不同大模型
|
|
54
|
+
- **流式输出** - 支持流式响应(规划中)
|
|
55
|
+
- **重试机制** - 内置超时和自动重试
|
|
56
|
+
- **详细日志** - 清晰的错误信息和调试支持
|
|
57
|
+
|
|
58
|
+
## 支持的模型
|
|
59
|
+
|
|
60
|
+
### 已验证
|
|
61
|
+
- [x] MiniMax-M2.7
|
|
62
|
+
- [x] MiniMax-M2.5
|
|
63
|
+
|
|
64
|
+
### 开发中
|
|
65
|
+
- [ ] 字节豆包 (Doubao)
|
|
66
|
+
- [ ] Kimi (Moonshot)
|
|
67
|
+
- [ ] 阶跃星辰 (StepFun)
|
|
68
|
+
- [ ] 百度文心一言 (ERNIE)
|
|
69
|
+
- [ ] 阿里通义千问 (Qwen)
|
|
70
|
+
- [ ] 智谱 GLM (ChatGLM)
|
|
71
|
+
|
|
72
|
+
## 安装
|
|
73
|
+
|
|
74
|
+
```bash
|
|
75
|
+
pip install cnllm
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
或从源码安装:
|
|
79
|
+
|
|
80
|
+
```bash
|
|
81
|
+
git clone https://github.com/yourusername/cnllm.git
|
|
82
|
+
cd cnllm
|
|
83
|
+
pip install -e .
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
## 快速开始
|
|
87
|
+
|
|
88
|
+
### 基础使用
|
|
89
|
+
|
|
90
|
+
```python
|
|
91
|
+
from cnllm import CNLLM, MINIMAX_API_KEY
|
|
92
|
+
|
|
93
|
+
# 初始化客户端
|
|
94
|
+
client = CNLLM(
|
|
95
|
+
model="minimax-m2.7", # 或 "minimax-m2.5"
|
|
96
|
+
api_key=MINIMAX_API_KEY
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
# 发送消息
|
|
100
|
+
resp = client.chat.create(
|
|
101
|
+
messages=[
|
|
102
|
+
{"role": "user", "content": "用一句话介绍自己"}
|
|
103
|
+
]
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
# 获取回复
|
|
107
|
+
print(resp["choices"][0]["message"]["content"])
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
### 环境变量配置
|
|
111
|
+
|
|
112
|
+
创建 `.env` 文件:
|
|
113
|
+
|
|
114
|
+
```env
|
|
115
|
+
MINIMAX_API_KEY=your_api_key_here
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
### 在 LangChain 中使用
|
|
119
|
+
|
|
120
|
+
```python
|
|
121
|
+
from langchain_core.messages import HumanMessage, AIMessage
|
|
122
|
+
from cnllm import CNLLM, MINIMAX_API_KEY
|
|
123
|
+
|
|
124
|
+
client = CNLLM(model="minimax-m2.7", api_key=MINIMAX_API_KEY)
|
|
125
|
+
|
|
126
|
+
# CNLLM 的输出可以直接被 LangChain 使用
|
|
127
|
+
resp = client.chat.create(
|
|
128
|
+
messages=[{"role": "user", "content": "你好"}]
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
# 转换为 LangChain 消息
|
|
132
|
+
ai_msg = AIMessage(content=resp["choices"][0]["message"]["content"])
|
|
133
|
+
print(ai_msg.content)
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
## API 参考
|
|
137
|
+
|
|
138
|
+
### CNLLM 客户端
|
|
139
|
+
|
|
140
|
+
```python
|
|
141
|
+
from cnllm import CNLLM
|
|
142
|
+
|
|
143
|
+
client = CNLLM(
|
|
144
|
+
model="minimax-m2.7", # 模型名称
|
|
145
|
+
api_key="your_api_key", # API 密钥
|
|
146
|
+
timeout=30, # 请求超时(秒)
|
|
147
|
+
max_retries=3, # 最大重试次数
|
|
148
|
+
retry_delay=1.0 # 重试延迟(秒)
|
|
149
|
+
)
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
### chat.create()
|
|
153
|
+
|
|
154
|
+
```python
|
|
155
|
+
resp = client.chat.create(
|
|
156
|
+
messages=[
|
|
157
|
+
{"role": "system", "content": "你是一个有帮助的助手"},
|
|
158
|
+
{"role": "user", "content": "你好"}
|
|
159
|
+
],
|
|
160
|
+
temperature=0.7, # 温度参数
|
|
161
|
+
stream=False, # 是否流式输出
|
|
162
|
+
model="minimax-m2.7" # 可覆盖默认模型
|
|
163
|
+
)
|
|
164
|
+
```
|
|
165
|
+
|
|
166
|
+
### 返回格式(OpenAI 标准)
|
|
167
|
+
|
|
168
|
+
```python
|
|
169
|
+
{
|
|
170
|
+
"id": "chatcmpl-xxx",
|
|
171
|
+
"object": "chat.completion",
|
|
172
|
+
"created": 1234567890,
|
|
173
|
+
"model": "minimax-m2.7",
|
|
174
|
+
"choices": [
|
|
175
|
+
{
|
|
176
|
+
"index": 0,
|
|
177
|
+
"message": {
|
|
178
|
+
"role": "assistant",
|
|
179
|
+
"content": "你好!有什么可以帮助你的吗?"
|
|
180
|
+
},
|
|
181
|
+
"finish_reason": "stop"
|
|
182
|
+
}
|
|
183
|
+
],
|
|
184
|
+
"usage": {
|
|
185
|
+
"prompt_tokens": 20,
|
|
186
|
+
"completion_tokens": 15,
|
|
187
|
+
"total_tokens": 35
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
```
|
|
191
|
+
|
|
192
|
+
## 项目结构
|
|
193
|
+
|
|
194
|
+
```
|
|
195
|
+
cnllm/
|
|
196
|
+
├── adapters/ # 适配器层
|
|
197
|
+
│ └── minimax/ # MiniMax 适配器
|
|
198
|
+
│ └── chat.py
|
|
199
|
+
├── core/ # 核心组件
|
|
200
|
+
│ ├── base.py # HTTP 客户端
|
|
201
|
+
│ ├── config.py # 配置管理
|
|
202
|
+
│ ├── exceptions.py # 异常定义
|
|
203
|
+
│ └── types.py # 类型定义
|
|
204
|
+
├── utils/ # 工具类
|
|
205
|
+
│ └── cleaner.py # 输出清理
|
|
206
|
+
├── client.py # 统一客户端入口
|
|
207
|
+
└── __init__.py
|
|
208
|
+
```
|
|
209
|
+
|
|
210
|
+
## 错误处理
|
|
211
|
+
|
|
212
|
+
```python
|
|
213
|
+
from cnllm import CNLLM
|
|
214
|
+
from cnllm.core.exceptions import ModelAPIError, ParseError
|
|
215
|
+
|
|
216
|
+
try:
|
|
217
|
+
client = CNLLM(model="minimax-m2.7", api_key="invalid_key")
|
|
218
|
+
resp = client.chat.create(messages=[{"role": "user", "content": "你好"}])
|
|
219
|
+
except ModelAPIError as e:
|
|
220
|
+
print(f"API 错误: {e}")
|
|
221
|
+
except ParseError as e:
|
|
222
|
+
print(f"解析错误: {e}")
|
|
223
|
+
except ValueError as e:
|
|
224
|
+
print(f"参数错误: {e}")
|
|
225
|
+
```
|
|
226
|
+
|
|
227
|
+
## 开发
|
|
228
|
+
|
|
229
|
+
### 运行测试
|
|
230
|
+
|
|
231
|
+
```bash
|
|
232
|
+
# 安装开发依赖
|
|
233
|
+
pip install -e .[dev]
|
|
234
|
+
|
|
235
|
+
# 运行所有测试
|
|
236
|
+
python test_CNLLM.py
|
|
237
|
+
```
|
|
238
|
+
|
|
239
|
+
### 添加新的模型适配器
|
|
240
|
+
|
|
241
|
+
1. 在 `adapters/` 下创建新的适配器目录
|
|
242
|
+
2. 实现 `create_completion()` 方法
|
|
243
|
+
3. 实现 `_to_openai_format()` 转换方法
|
|
244
|
+
4. 在 `client.py` 中注册适配器
|
|
245
|
+
|
|
246
|
+
## 贡献
|
|
247
|
+
|
|
248
|
+
欢迎提交 Issue 和 Pull Request!
|
|
249
|
+
|
|
250
|
+
## 许可证
|
|
251
|
+
|
|
252
|
+
MIT License - 详见 [LICENSE](LICENSE) 文件
|
|
253
|
+
|
|
254
|
+
## 联系方式
|
|
255
|
+
|
|
256
|
+
- GitHub Issues: [https://github.com/yourusername/cnllm/issues](https://github.com/yourusername/cnllm/issues)
|
|
257
|
+
- Email: your.email@example.com
|
cnllm-0.1.0/README.md
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
# CNLLM - Chinese LLM Adapter
|
|
2
|
+
|
|
3
|
+
[English](README_en.md) | 中文
|
|
4
|
+
|
|
5
|
+

|
|
6
|
+

|
|
7
|
+

|
|
8
|
+

|
|
9
|
+

|
|
10
|
+
|
|
11
|
+
---
|
|
12
|
+
|
|
13
|
+
统一的中文大模型适配库,将各种国产大模型(如 MiniMax、字节豆包、Kimi 等)的 API 输出转换为统一的 OpenAI 格式,零成本接入 LangChain、AutoGen 等主流 AI 框架。
|
|
14
|
+
|
|
15
|
+
## 特性
|
|
16
|
+
|
|
17
|
+
- **OpenAI 兼容** - 所有输出完全对齐 OpenAI API 标准格式
|
|
18
|
+
- **LangChain 原生支持** - 可直接使用 LangChain 的消息类型和工具函数
|
|
19
|
+
- **统一接口** - 一套代码,无缝切换不同大模型
|
|
20
|
+
- **流式输出** - 支持流式响应(规划中)
|
|
21
|
+
- **重试机制** - 内置超时和自动重试
|
|
22
|
+
- **详细日志** - 清晰的错误信息和调试支持
|
|
23
|
+
|
|
24
|
+
## 支持的模型
|
|
25
|
+
|
|
26
|
+
### 已验证
|
|
27
|
+
- [x] MiniMax-M2.7
|
|
28
|
+
- [x] MiniMax-M2.5
|
|
29
|
+
|
|
30
|
+
### 开发中
|
|
31
|
+
- [ ] 字节豆包 (Doubao)
|
|
32
|
+
- [ ] Kimi (Moonshot)
|
|
33
|
+
- [ ] 阶跃星辰 (StepFun)
|
|
34
|
+
- [ ] 百度文心一言 (ERNIE)
|
|
35
|
+
- [ ] 阿里通义千问 (Qwen)
|
|
36
|
+
- [ ] 智谱 GLM (ChatGLM)
|
|
37
|
+
|
|
38
|
+
## 安装
|
|
39
|
+
|
|
40
|
+
```bash
|
|
41
|
+
pip install cnllm
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
或从源码安装:
|
|
45
|
+
|
|
46
|
+
```bash
|
|
47
|
+
git clone https://github.com/yourusername/cnllm.git
|
|
48
|
+
cd cnllm
|
|
49
|
+
pip install -e .
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
## 快速开始
|
|
53
|
+
|
|
54
|
+
### 基础使用
|
|
55
|
+
|
|
56
|
+
```python
|
|
57
|
+
from cnllm import CNLLM, MINIMAX_API_KEY
|
|
58
|
+
|
|
59
|
+
# 初始化客户端
|
|
60
|
+
client = CNLLM(
|
|
61
|
+
model="minimax-m2.7", # 或 "minimax-m2.5"
|
|
62
|
+
api_key=MINIMAX_API_KEY
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
# 发送消息
|
|
66
|
+
resp = client.chat.create(
|
|
67
|
+
messages=[
|
|
68
|
+
{"role": "user", "content": "用一句话介绍自己"}
|
|
69
|
+
]
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
# 获取回复
|
|
73
|
+
print(resp["choices"][0]["message"]["content"])
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
### 环境变量配置
|
|
77
|
+
|
|
78
|
+
创建 `.env` 文件:
|
|
79
|
+
|
|
80
|
+
```env
|
|
81
|
+
MINIMAX_API_KEY=your_api_key_here
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
### 在 LangChain 中使用
|
|
85
|
+
|
|
86
|
+
```python
|
|
87
|
+
from langchain_core.messages import HumanMessage, AIMessage
|
|
88
|
+
from cnllm import CNLLM, MINIMAX_API_KEY
|
|
89
|
+
|
|
90
|
+
client = CNLLM(model="minimax-m2.7", api_key=MINIMAX_API_KEY)
|
|
91
|
+
|
|
92
|
+
# CNLLM 的输出可以直接被 LangChain 使用
|
|
93
|
+
resp = client.chat.create(
|
|
94
|
+
messages=[{"role": "user", "content": "你好"}]
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
# 转换为 LangChain 消息
|
|
98
|
+
ai_msg = AIMessage(content=resp["choices"][0]["message"]["content"])
|
|
99
|
+
print(ai_msg.content)
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
## API 参考
|
|
103
|
+
|
|
104
|
+
### CNLLM 客户端
|
|
105
|
+
|
|
106
|
+
```python
|
|
107
|
+
from cnllm import CNLLM
|
|
108
|
+
|
|
109
|
+
client = CNLLM(
|
|
110
|
+
model="minimax-m2.7", # 模型名称
|
|
111
|
+
api_key="your_api_key", # API 密钥
|
|
112
|
+
timeout=30, # 请求超时(秒)
|
|
113
|
+
max_retries=3, # 最大重试次数
|
|
114
|
+
retry_delay=1.0 # 重试延迟(秒)
|
|
115
|
+
)
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
### chat.create()
|
|
119
|
+
|
|
120
|
+
```python
|
|
121
|
+
resp = client.chat.create(
|
|
122
|
+
messages=[
|
|
123
|
+
{"role": "system", "content": "你是一个有帮助的助手"},
|
|
124
|
+
{"role": "user", "content": "你好"}
|
|
125
|
+
],
|
|
126
|
+
temperature=0.7, # 温度参数
|
|
127
|
+
stream=False, # 是否流式输出
|
|
128
|
+
model="minimax-m2.7" # 可覆盖默认模型
|
|
129
|
+
)
|
|
130
|
+
```
|
|
131
|
+
|
|
132
|
+
### 返回格式(OpenAI 标准)
|
|
133
|
+
|
|
134
|
+
```python
|
|
135
|
+
{
|
|
136
|
+
"id": "chatcmpl-xxx",
|
|
137
|
+
"object": "chat.completion",
|
|
138
|
+
"created": 1234567890,
|
|
139
|
+
"model": "minimax-m2.7",
|
|
140
|
+
"choices": [
|
|
141
|
+
{
|
|
142
|
+
"index": 0,
|
|
143
|
+
"message": {
|
|
144
|
+
"role": "assistant",
|
|
145
|
+
"content": "你好!有什么可以帮助你的吗?"
|
|
146
|
+
},
|
|
147
|
+
"finish_reason": "stop"
|
|
148
|
+
}
|
|
149
|
+
],
|
|
150
|
+
"usage": {
|
|
151
|
+
"prompt_tokens": 20,
|
|
152
|
+
"completion_tokens": 15,
|
|
153
|
+
"total_tokens": 35
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
## 项目结构
|
|
159
|
+
|
|
160
|
+
```
|
|
161
|
+
cnllm/
|
|
162
|
+
├── adapters/ # 适配器层
|
|
163
|
+
│ └── minimax/ # MiniMax 适配器
|
|
164
|
+
│ └── chat.py
|
|
165
|
+
├── core/ # 核心组件
|
|
166
|
+
│ ├── base.py # HTTP 客户端
|
|
167
|
+
│ ├── config.py # 配置管理
|
|
168
|
+
│ ├── exceptions.py # 异常定义
|
|
169
|
+
│ └── types.py # 类型定义
|
|
170
|
+
├── utils/ # 工具类
|
|
171
|
+
│ └── cleaner.py # 输出清理
|
|
172
|
+
├── client.py # 统一客户端入口
|
|
173
|
+
└── __init__.py
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
## 错误处理
|
|
177
|
+
|
|
178
|
+
```python
|
|
179
|
+
from cnllm import CNLLM
|
|
180
|
+
from cnllm.core.exceptions import ModelAPIError, ParseError
|
|
181
|
+
|
|
182
|
+
try:
|
|
183
|
+
client = CNLLM(model="minimax-m2.7", api_key="invalid_key")
|
|
184
|
+
resp = client.chat.create(messages=[{"role": "user", "content": "你好"}])
|
|
185
|
+
except ModelAPIError as e:
|
|
186
|
+
print(f"API 错误: {e}")
|
|
187
|
+
except ParseError as e:
|
|
188
|
+
print(f"解析错误: {e}")
|
|
189
|
+
except ValueError as e:
|
|
190
|
+
print(f"参数错误: {e}")
|
|
191
|
+
```
|
|
192
|
+
|
|
193
|
+
## 开发
|
|
194
|
+
|
|
195
|
+
### 运行测试
|
|
196
|
+
|
|
197
|
+
```bash
|
|
198
|
+
# 安装开发依赖
|
|
199
|
+
pip install -e .[dev]
|
|
200
|
+
|
|
201
|
+
# 运行所有测试
|
|
202
|
+
python test_CNLLM.py
|
|
203
|
+
```
|
|
204
|
+
|
|
205
|
+
### 添加新的模型适配器
|
|
206
|
+
|
|
207
|
+
1. 在 `adapters/` 下创建新的适配器目录
|
|
208
|
+
2. 实现 `create_completion()` 方法
|
|
209
|
+
3. 实现 `_to_openai_format()` 转换方法
|
|
210
|
+
4. 在 `client.py` 中注册适配器
|
|
211
|
+
|
|
212
|
+
## 贡献
|
|
213
|
+
|
|
214
|
+
欢迎提交 Issue 和 Pull Request!
|
|
215
|
+
|
|
216
|
+
## 许可证
|
|
217
|
+
|
|
218
|
+
MIT License - 详见 [LICENSE](LICENSE) 文件
|
|
219
|
+
|
|
220
|
+
## 联系方式
|
|
221
|
+
|
|
222
|
+
- GitHub Issues: [https://github.com/yourusername/cnllm/issues](https://github.com/yourusername/cnllm/issues)
|
|
223
|
+
- Email: your.email@example.com
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import uuid
|
|
3
|
+
import time
|
|
4
|
+
from typing import Dict, Any, List, Optional
|
|
5
|
+
from ...core.base import BaseHttpClient
|
|
6
|
+
from ...core.exceptions import ParseError, ModelAPIError
|
|
7
|
+
from ...utils.cleaner import OutputCleaner
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class MiniMaxAdapter:
|
|
11
|
+
SUPPORTED_MODELS = ["minimax-m2.7", "minimax-m2.5"]
|
|
12
|
+
DEFAULT_MODEL = "minimax-m2.7"
|
|
13
|
+
|
|
14
|
+
def __init__(
|
|
15
|
+
self,
|
|
16
|
+
api_key: str,
|
|
17
|
+
model: str = None,
|
|
18
|
+
timeout: int = 30,
|
|
19
|
+
max_retries: int = 3,
|
|
20
|
+
retry_delay: float = 1.0
|
|
21
|
+
):
|
|
22
|
+
self.client = BaseHttpClient(
|
|
23
|
+
api_key=api_key,
|
|
24
|
+
base_url="https://api.minimaxi.com",
|
|
25
|
+
timeout=timeout,
|
|
26
|
+
max_retries=max_retries,
|
|
27
|
+
retry_delay=retry_delay
|
|
28
|
+
)
|
|
29
|
+
self.cleaner = OutputCleaner()
|
|
30
|
+
self.model = model or self.DEFAULT_MODEL
|
|
31
|
+
|
|
32
|
+
if self.model not in self.SUPPORTED_MODELS:
|
|
33
|
+
raise ValueError(
|
|
34
|
+
f"不支持的模型: {model}\n"
|
|
35
|
+
f"支持的模型: {', '.join(self.SUPPORTED_MODELS)}"
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
def create_completion(
|
|
39
|
+
self,
|
|
40
|
+
messages: List[Dict[str, str]],
|
|
41
|
+
temperature: float = 0.1,
|
|
42
|
+
stream: bool = False,
|
|
43
|
+
model: str = None
|
|
44
|
+
) -> Dict[str, Any]:
|
|
45
|
+
use_model = model or self.model
|
|
46
|
+
|
|
47
|
+
if use_model not in self.SUPPORTED_MODELS:
|
|
48
|
+
raise ValueError(f"不支持的模型: {use_model}")
|
|
49
|
+
|
|
50
|
+
payload = {
|
|
51
|
+
"model": self._to_minimax_model_name(use_model),
|
|
52
|
+
"messages": messages,
|
|
53
|
+
"temperature": temperature,
|
|
54
|
+
"stream": stream
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
try:
|
|
58
|
+
raw_resp = self.client.post("/v1/text/chatcompletion_v2", payload)
|
|
59
|
+
except RuntimeError as e:
|
|
60
|
+
raise ModelAPIError(f"MiniMax API 请求失败: {e}")
|
|
61
|
+
|
|
62
|
+
return self._to_openai_format(raw_resp, use_model)
|
|
63
|
+
|
|
64
|
+
def _to_minimax_model_name(self, model: str) -> str:
|
|
65
|
+
"""将统一模型名转换为MiniMax API模型名"""
|
|
66
|
+
mapping = {
|
|
67
|
+
"minimax-m2.7": "MiniMax-M2.7",
|
|
68
|
+
"minimax-m2.5": "MiniMax-M2.5"
|
|
69
|
+
}
|
|
70
|
+
return mapping.get(model, model)
|
|
71
|
+
|
|
72
|
+
def _to_openai_format(self, raw: Dict[str, Any], model: str) -> Dict[str, Any]:
|
|
73
|
+
"""
|
|
74
|
+
MiniMax M2.5/M2.7 原生响应 → 转换为 OpenAI 标准结构
|
|
75
|
+
"""
|
|
76
|
+
base_resp = raw.get("base_resp", {})
|
|
77
|
+
if base_resp.get("status_code") and base_resp["status_code"] != 0:
|
|
78
|
+
raise ModelAPIError(
|
|
79
|
+
f"MiniMax API 错误: {base_resp.get('status_msg', '未知错误')}\n"
|
|
80
|
+
f"状态码: {base_resp.get('status_code')}"
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
try:
|
|
84
|
+
raw_content = raw["choices"][0]["message"]["content"]
|
|
85
|
+
finish_reason = raw["choices"][0].get("finish_reason", "stop")
|
|
86
|
+
except (KeyError, IndexError) as e:
|
|
87
|
+
raise ParseError(f"解析响应失败: 缺少必要字段 {e}\n原始响应: {raw}")
|
|
88
|
+
|
|
89
|
+
cleaned = self.cleaner.clean(raw_content)
|
|
90
|
+
|
|
91
|
+
usage = raw.get("usage", {})
|
|
92
|
+
prompt_tokens = usage.get("prompt_tokens", 0)
|
|
93
|
+
completion_tokens = usage.get("completion_tokens", 0)
|
|
94
|
+
total_tokens = usage.get("total_tokens", 0)
|
|
95
|
+
|
|
96
|
+
return {
|
|
97
|
+
"id": f"chatcmpl-{uuid.uuid4().hex[:24]}",
|
|
98
|
+
"object": "chat.completion",
|
|
99
|
+
"created": int(time.time()),
|
|
100
|
+
"model": model,
|
|
101
|
+
"choices": [
|
|
102
|
+
{
|
|
103
|
+
"index": 0,
|
|
104
|
+
"message": {
|
|
105
|
+
"role": "assistant",
|
|
106
|
+
"content": cleaned
|
|
107
|
+
},
|
|
108
|
+
"finish_reason": finish_reason
|
|
109
|
+
}
|
|
110
|
+
],
|
|
111
|
+
"usage": {
|
|
112
|
+
"prompt_tokens": prompt_tokens,
|
|
113
|
+
"completion_tokens": completion_tokens,
|
|
114
|
+
"total_tokens": total_tokens
|
|
115
|
+
}
|
|
116
|
+
}
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
from typing import Optional, Dict, Any
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
|
|
4
|
+
from .adapters.minimax.chat import MiniMaxAdapter
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class CNLLM:
|
|
8
|
+
SUPPORTED_MODELS = ["minimax", "minimax-m2.5", "minimax-m2.7"]
|
|
9
|
+
|
|
10
|
+
def __init__(
|
|
11
|
+
self,
|
|
12
|
+
model: str,
|
|
13
|
+
api_key: str,
|
|
14
|
+
timeout: int = 30,
|
|
15
|
+
max_retries: int = 3,
|
|
16
|
+
retry_delay: float = 1.0
|
|
17
|
+
):
|
|
18
|
+
self.model = self._normalize_model(model)
|
|
19
|
+
self.api_key = api_key
|
|
20
|
+
self.timeout = timeout
|
|
21
|
+
self.max_retries = max_retries
|
|
22
|
+
self.retry_delay = retry_delay
|
|
23
|
+
self.adapter = self._get_adapter()
|
|
24
|
+
self.chat = self.ChatNamespace(self)
|
|
25
|
+
|
|
26
|
+
def _normalize_model(self, model: str) -> str:
|
|
27
|
+
model = model.lower()
|
|
28
|
+
if model == "minimax":
|
|
29
|
+
model = "minimax-m2.7"
|
|
30
|
+
return model
|
|
31
|
+
|
|
32
|
+
def _get_adapter(self):
|
|
33
|
+
if self.model.startswith("minimax"):
|
|
34
|
+
return MiniMaxAdapter(
|
|
35
|
+
api_key=self.api_key,
|
|
36
|
+
model=self.model,
|
|
37
|
+
timeout=self.timeout,
|
|
38
|
+
max_retries=self.max_retries,
|
|
39
|
+
retry_delay=self.retry_delay
|
|
40
|
+
)
|
|
41
|
+
raise ValueError(f"暂不支持模型: {self.model}\n支持的模型: {', '.join(self.SUPPORTED_MODELS)}")
|
|
42
|
+
|
|
43
|
+
def create_chat_completion(
|
|
44
|
+
self,
|
|
45
|
+
messages: list[Dict[str, str]],
|
|
46
|
+
temperature: float = 0.1,
|
|
47
|
+
stream: bool = False,
|
|
48
|
+
model: str = None
|
|
49
|
+
) -> Dict[str, Any]:
|
|
50
|
+
return self.adapter.create_completion(
|
|
51
|
+
messages=messages,
|
|
52
|
+
temperature=temperature,
|
|
53
|
+
stream=stream,
|
|
54
|
+
model=model
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
class ChatNamespace:
|
|
58
|
+
def __init__(self, parent):
|
|
59
|
+
self.parent = parent
|
|
60
|
+
|
|
61
|
+
def create(
|
|
62
|
+
self,
|
|
63
|
+
messages: list[Dict[str, str]],
|
|
64
|
+
temperature: float = 0.1,
|
|
65
|
+
stream: bool = False,
|
|
66
|
+
model: str = None
|
|
67
|
+
):
|
|
68
|
+
return self.parent.create_chat_completion(
|
|
69
|
+
messages=messages,
|
|
70
|
+
temperature=temperature,
|
|
71
|
+
stream=stream,
|
|
72
|
+
model=model
|
|
73
|
+
)
|
|
File without changes
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import requests
|
|
3
|
+
from typing import Dict, Any
|
|
4
|
+
from .exceptions import ModelAPIError
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class BaseHttpClient:
|
|
8
|
+
def __init__(
|
|
9
|
+
self,
|
|
10
|
+
api_key: str,
|
|
11
|
+
base_url: str,
|
|
12
|
+
timeout: int = 30,
|
|
13
|
+
max_retries: int = 3,
|
|
14
|
+
retry_delay: float = 1.0
|
|
15
|
+
):
|
|
16
|
+
self.api_key = ''.join(api_key.strip().split())
|
|
17
|
+
self.base_url = base_url.strip()
|
|
18
|
+
self.timeout = timeout
|
|
19
|
+
self.max_retries = max_retries
|
|
20
|
+
self.retry_delay = retry_delay
|
|
21
|
+
|
|
22
|
+
def post(self, path: str, payload: Dict[str, Any]) -> Dict[str, Any]:
|
|
23
|
+
headers = {
|
|
24
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
25
|
+
"Content-Type": "application/json; charset=utf-8"
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
last_error = None
|
|
29
|
+
for attempt in range(self.max_retries):
|
|
30
|
+
try:
|
|
31
|
+
response = requests.post(
|
|
32
|
+
url=f"{self.base_url}{path}",
|
|
33
|
+
headers=headers,
|
|
34
|
+
json=payload,
|
|
35
|
+
timeout=self.timeout
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
if response.status_code == 429:
|
|
39
|
+
wait_time = self.retry_delay * (2 ** attempt)
|
|
40
|
+
raise ModelAPIError(
|
|
41
|
+
f"请求被限流 (429)。\n"
|
|
42
|
+
f"等待 {wait_time:.1f} 秒后重试... (尝试 {attempt + 1}/{self.max_retries})"
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
response.raise_for_status()
|
|
46
|
+
return response.json()
|
|
47
|
+
|
|
48
|
+
except requests.exceptions.Timeout:
|
|
49
|
+
last_error = f"请求超时 (timeout={self.timeout}s)"
|
|
50
|
+
if attempt < self.max_retries - 1:
|
|
51
|
+
time.sleep(self.retry_delay)
|
|
52
|
+
continue
|
|
53
|
+
|
|
54
|
+
except requests.exceptions.ConnectionError as e:
|
|
55
|
+
last_error = f"连接失败: {str(e)}"
|
|
56
|
+
if attempt < self.max_retries - 1:
|
|
57
|
+
time.sleep(self.retry_delay)
|
|
58
|
+
continue
|
|
59
|
+
|
|
60
|
+
except requests.exceptions.HTTPError as e:
|
|
61
|
+
status_code = e.response.status_code
|
|
62
|
+
if status_code == 429:
|
|
63
|
+
wait_time = self.retry_delay * (2 ** attempt)
|
|
64
|
+
if attempt < self.max_retries - 1:
|
|
65
|
+
time.sleep(wait_time)
|
|
66
|
+
continue
|
|
67
|
+
raise ModelAPIError(
|
|
68
|
+
f"请求被限流 (429)。已重试 {self.max_retries} 次。\n"
|
|
69
|
+
f"请稍后重试或联系 API 提供商。"
|
|
70
|
+
)
|
|
71
|
+
elif status_code >= 500:
|
|
72
|
+
last_error = f"服务器错误 ({status_code})"
|
|
73
|
+
if attempt < self.max_retries - 1:
|
|
74
|
+
time.sleep(self.retry_delay * (2 ** attempt))
|
|
75
|
+
continue
|
|
76
|
+
raise ModelAPIError(
|
|
77
|
+
f"API 服务器错误 ({status_code})。已重试 {self.max_retries} 次。\n"
|
|
78
|
+
f"请稍后重试。"
|
|
79
|
+
)
|
|
80
|
+
else:
|
|
81
|
+
raise ModelAPIError(
|
|
82
|
+
f"API 请求失败 (HTTP {status_code}): {str(e)}\n"
|
|
83
|
+
f"请检查 API Key 和请求参数。"
|
|
84
|
+
)
|
|
85
|
+
except Exception as e:
|
|
86
|
+
raise ModelAPIError(f"API 请求失败: {str(e)}")
|
|
87
|
+
|
|
88
|
+
raise ModelAPIError(f"重试 {self.max_retries} 次后仍然失败。最后错误: {last_error}")
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from dotenv import load_dotenv
|
|
3
|
+
|
|
4
|
+
# 自动加载 .env 文件
|
|
5
|
+
load_dotenv()
|
|
6
|
+
|
|
7
|
+
def get_env(key: str, default: str = None) -> str:
|
|
8
|
+
value = os.getenv(key, default)
|
|
9
|
+
if not value:
|
|
10
|
+
raise ValueError(f"环境变量 {key} 未设置,请检查 .env 文件")
|
|
11
|
+
return value.strip()
|
|
12
|
+
|
|
13
|
+
# MiniMax
|
|
14
|
+
MINIMAX_API_KEY = get_env("MINIMAX_API_KEY")
|
|
File without changes
|
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: cnllm
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: 统一的中文大模型适配库,将中国大模型 API 输出转换为 OpenAI 格式,无缝接入openai、langchain等任何openai结构适配的python库
|
|
5
|
+
Author-email: kanchengw <wangkancheng1122@163.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/kanchengw/cnllm
|
|
8
|
+
Project-URL: Documentation, https://github.com/kanchengw/cnllm#readme
|
|
9
|
+
Project-URL: Repository, https://github.com/kanchengw/cnllm
|
|
10
|
+
Project-URL: Issues, https://github.com/kanchengw/cnllm/issues
|
|
11
|
+
Keywords: ai,llm,openai,langchain,minimax,chinese-llm,adapter
|
|
12
|
+
Classifier: Development Status :: 3 - Alpha
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
22
|
+
Requires-Python: >=3.8
|
|
23
|
+
Description-Content-Type: text/markdown
|
|
24
|
+
License-File: LICENSE
|
|
25
|
+
Requires-Dist: requests>=2.25.0
|
|
26
|
+
Requires-Dist: python-dotenv>=0.19.0
|
|
27
|
+
Provides-Extra: dev
|
|
28
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
29
|
+
Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
|
|
30
|
+
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
31
|
+
Requires-Dist: flake8>=6.0.0; extra == "dev"
|
|
32
|
+
Requires-Dist: mypy>=1.0.0; extra == "dev"
|
|
33
|
+
Dynamic: license-file
|
|
34
|
+
|
|
35
|
+
# CNLLM - Chinese LLM Adapter
|
|
36
|
+
|
|
37
|
+
[English](README_en.md) | 中文
|
|
38
|
+
|
|
39
|
+

|
|
40
|
+

|
|
41
|
+

|
|
42
|
+

|
|
43
|
+

|
|
44
|
+
|
|
45
|
+
---
|
|
46
|
+
|
|
47
|
+
统一的中文大模型适配库,将各种国产大模型(如 MiniMax、字节豆包、Kimi 等)的 API 输出转换为统一的 OpenAI 格式,零成本接入 LangChain、AutoGen 等主流 AI 框架。
|
|
48
|
+
|
|
49
|
+
## 特性
|
|
50
|
+
|
|
51
|
+
- **OpenAI 兼容** - 所有输出完全对齐 OpenAI API 标准格式
|
|
52
|
+
- **LangChain 原生支持** - 可直接使用 LangChain 的消息类型和工具函数
|
|
53
|
+
- **统一接口** - 一套代码,无缝切换不同大模型
|
|
54
|
+
- **流式输出** - 支持流式响应(规划中)
|
|
55
|
+
- **重试机制** - 内置超时和自动重试
|
|
56
|
+
- **详细日志** - 清晰的错误信息和调试支持
|
|
57
|
+
|
|
58
|
+
## 支持的模型
|
|
59
|
+
|
|
60
|
+
### 已验证
|
|
61
|
+
- [x] MiniMax-M2.7
|
|
62
|
+
- [x] MiniMax-M2.5
|
|
63
|
+
|
|
64
|
+
### 开发中
|
|
65
|
+
- [ ] 字节豆包 (Doubao)
|
|
66
|
+
- [ ] Kimi (Moonshot)
|
|
67
|
+
- [ ] 阶跃星辰 (StepFun)
|
|
68
|
+
- [ ] 百度文心一言 (ERNIE)
|
|
69
|
+
- [ ] 阿里通义千问 (Qwen)
|
|
70
|
+
- [ ] 智谱 GLM (ChatGLM)
|
|
71
|
+
|
|
72
|
+
## 安装
|
|
73
|
+
|
|
74
|
+
```bash
|
|
75
|
+
pip install cnllm
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
或从源码安装:
|
|
79
|
+
|
|
80
|
+
```bash
|
|
81
|
+
git clone https://github.com/yourusername/cnllm.git
|
|
82
|
+
cd cnllm
|
|
83
|
+
pip install -e .
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
## 快速开始
|
|
87
|
+
|
|
88
|
+
### 基础使用
|
|
89
|
+
|
|
90
|
+
```python
|
|
91
|
+
from cnllm import CNLLM, MINIMAX_API_KEY
|
|
92
|
+
|
|
93
|
+
# 初始化客户端
|
|
94
|
+
client = CNLLM(
|
|
95
|
+
model="minimax-m2.7", # 或 "minimax-m2.5"
|
|
96
|
+
api_key=MINIMAX_API_KEY
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
# 发送消息
|
|
100
|
+
resp = client.chat.create(
|
|
101
|
+
messages=[
|
|
102
|
+
{"role": "user", "content": "用一句话介绍自己"}
|
|
103
|
+
]
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
# 获取回复
|
|
107
|
+
print(resp["choices"][0]["message"]["content"])
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
### 环境变量配置
|
|
111
|
+
|
|
112
|
+
创建 `.env` 文件:
|
|
113
|
+
|
|
114
|
+
```env
|
|
115
|
+
MINIMAX_API_KEY=your_api_key_here
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
### 在 LangChain 中使用
|
|
119
|
+
|
|
120
|
+
```python
|
|
121
|
+
from langchain_core.messages import HumanMessage, AIMessage
|
|
122
|
+
from cnllm import CNLLM, MINIMAX_API_KEY
|
|
123
|
+
|
|
124
|
+
client = CNLLM(model="minimax-m2.7", api_key=MINIMAX_API_KEY)
|
|
125
|
+
|
|
126
|
+
# CNLLM 的输出可以直接被 LangChain 使用
|
|
127
|
+
resp = client.chat.create(
|
|
128
|
+
messages=[{"role": "user", "content": "你好"}]
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
# 转换为 LangChain 消息
|
|
132
|
+
ai_msg = AIMessage(content=resp["choices"][0]["message"]["content"])
|
|
133
|
+
print(ai_msg.content)
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
## API 参考
|
|
137
|
+
|
|
138
|
+
### CNLLM 客户端
|
|
139
|
+
|
|
140
|
+
```python
|
|
141
|
+
from cnllm import CNLLM
|
|
142
|
+
|
|
143
|
+
client = CNLLM(
|
|
144
|
+
model="minimax-m2.7", # 模型名称
|
|
145
|
+
api_key="your_api_key", # API 密钥
|
|
146
|
+
timeout=30, # 请求超时(秒)
|
|
147
|
+
max_retries=3, # 最大重试次数
|
|
148
|
+
retry_delay=1.0 # 重试延迟(秒)
|
|
149
|
+
)
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
### chat.create()
|
|
153
|
+
|
|
154
|
+
```python
|
|
155
|
+
resp = client.chat.create(
|
|
156
|
+
messages=[
|
|
157
|
+
{"role": "system", "content": "你是一个有帮助的助手"},
|
|
158
|
+
{"role": "user", "content": "你好"}
|
|
159
|
+
],
|
|
160
|
+
temperature=0.7, # 温度参数
|
|
161
|
+
stream=False, # 是否流式输出
|
|
162
|
+
model="minimax-m2.7" # 可覆盖默认模型
|
|
163
|
+
)
|
|
164
|
+
```
|
|
165
|
+
|
|
166
|
+
### 返回格式(OpenAI 标准)
|
|
167
|
+
|
|
168
|
+
```python
|
|
169
|
+
{
|
|
170
|
+
"id": "chatcmpl-xxx",
|
|
171
|
+
"object": "chat.completion",
|
|
172
|
+
"created": 1234567890,
|
|
173
|
+
"model": "minimax-m2.7",
|
|
174
|
+
"choices": [
|
|
175
|
+
{
|
|
176
|
+
"index": 0,
|
|
177
|
+
"message": {
|
|
178
|
+
"role": "assistant",
|
|
179
|
+
"content": "你好!有什么可以帮助你的吗?"
|
|
180
|
+
},
|
|
181
|
+
"finish_reason": "stop"
|
|
182
|
+
}
|
|
183
|
+
],
|
|
184
|
+
"usage": {
|
|
185
|
+
"prompt_tokens": 20,
|
|
186
|
+
"completion_tokens": 15,
|
|
187
|
+
"total_tokens": 35
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
```
|
|
191
|
+
|
|
192
|
+
## 项目结构
|
|
193
|
+
|
|
194
|
+
```
|
|
195
|
+
cnllm/
|
|
196
|
+
├── adapters/ # 适配器层
|
|
197
|
+
│ └── minimax/ # MiniMax 适配器
|
|
198
|
+
│ └── chat.py
|
|
199
|
+
├── core/ # 核心组件
|
|
200
|
+
│ ├── base.py # HTTP 客户端
|
|
201
|
+
│ ├── config.py # 配置管理
|
|
202
|
+
│ ├── exceptions.py # 异常定义
|
|
203
|
+
│ └── types.py # 类型定义
|
|
204
|
+
├── utils/ # 工具类
|
|
205
|
+
│ └── cleaner.py # 输出清理
|
|
206
|
+
├── client.py # 统一客户端入口
|
|
207
|
+
└── __init__.py
|
|
208
|
+
```
|
|
209
|
+
|
|
210
|
+
## 错误处理
|
|
211
|
+
|
|
212
|
+
```python
|
|
213
|
+
from cnllm import CNLLM
|
|
214
|
+
from cnllm.core.exceptions import ModelAPIError, ParseError
|
|
215
|
+
|
|
216
|
+
try:
|
|
217
|
+
client = CNLLM(model="minimax-m2.7", api_key="invalid_key")
|
|
218
|
+
resp = client.chat.create(messages=[{"role": "user", "content": "你好"}])
|
|
219
|
+
except ModelAPIError as e:
|
|
220
|
+
print(f"API 错误: {e}")
|
|
221
|
+
except ParseError as e:
|
|
222
|
+
print(f"解析错误: {e}")
|
|
223
|
+
except ValueError as e:
|
|
224
|
+
print(f"参数错误: {e}")
|
|
225
|
+
```
|
|
226
|
+
|
|
227
|
+
## 开发
|
|
228
|
+
|
|
229
|
+
### 运行测试
|
|
230
|
+
|
|
231
|
+
```bash
|
|
232
|
+
# 安装开发依赖
|
|
233
|
+
pip install -e .[dev]
|
|
234
|
+
|
|
235
|
+
# 运行所有测试
|
|
236
|
+
python test_CNLLM.py
|
|
237
|
+
```
|
|
238
|
+
|
|
239
|
+
### 添加新的模型适配器
|
|
240
|
+
|
|
241
|
+
1. 在 `adapters/` 下创建新的适配器目录
|
|
242
|
+
2. 实现 `create_completion()` 方法
|
|
243
|
+
3. 实现 `_to_openai_format()` 转换方法
|
|
244
|
+
4. 在 `client.py` 中注册适配器
|
|
245
|
+
|
|
246
|
+
## 贡献
|
|
247
|
+
|
|
248
|
+
欢迎提交 Issue 和 Pull Request!
|
|
249
|
+
|
|
250
|
+
## 许可证
|
|
251
|
+
|
|
252
|
+
MIT License - 详见 [LICENSE](LICENSE) 文件
|
|
253
|
+
|
|
254
|
+
## 联系方式
|
|
255
|
+
|
|
256
|
+
- GitHub Issues: [https://github.com/yourusername/cnllm/issues](https://github.com/yourusername/cnllm/issues)
|
|
257
|
+
- Email: your.email@example.com
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
LICENSE
|
|
2
|
+
README.md
|
|
3
|
+
pyproject.toml
|
|
4
|
+
cnllm/__init__.py
|
|
5
|
+
cnllm/client.py
|
|
6
|
+
cnllm/example.py
|
|
7
|
+
cnllm.egg-info/PKG-INFO
|
|
8
|
+
cnllm.egg-info/SOURCES.txt
|
|
9
|
+
cnllm.egg-info/dependency_links.txt
|
|
10
|
+
cnllm.egg-info/requires.txt
|
|
11
|
+
cnllm.egg-info/top_level.txt
|
|
12
|
+
cnllm/adapters/__init__.py
|
|
13
|
+
cnllm/adapters/minimax/__init__.py
|
|
14
|
+
cnllm/adapters/minimax/chat.py
|
|
15
|
+
cnllm/core/__init__.py
|
|
16
|
+
cnllm/core/base.py
|
|
17
|
+
cnllm/core/config.py
|
|
18
|
+
cnllm/core/exceptions.py
|
|
19
|
+
cnllm/core/types.py
|
|
20
|
+
cnllm/utils/__init__.py
|
|
21
|
+
cnllm/utils/cleaner.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
cnllm
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=61.0", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "cnllm"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "统一的中文大模型适配库,将中国大模型 API 输出转换为 OpenAI 格式,无缝接入openai、langchain等任何openai结构适配的python库"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = {text = "MIT"}
|
|
11
|
+
authors = [
|
|
12
|
+
{name = "kanchengw", email = "wangkancheng1122@163.com"}
|
|
13
|
+
]
|
|
14
|
+
keywords = ["ai", "llm", "openai", "langchain", "minimax", "chinese-llm", "adapter"]
|
|
15
|
+
classifiers = [
|
|
16
|
+
"Development Status :: 3 - Alpha",
|
|
17
|
+
"Intended Audience :: Developers",
|
|
18
|
+
"License :: OSI Approved :: MIT License",
|
|
19
|
+
"Programming Language :: Python :: 3",
|
|
20
|
+
"Programming Language :: Python :: 3.8",
|
|
21
|
+
"Programming Language :: Python :: 3.9",
|
|
22
|
+
"Programming Language :: Python :: 3.10",
|
|
23
|
+
"Programming Language :: Python :: 3.11",
|
|
24
|
+
"Programming Language :: Python :: 3.12",
|
|
25
|
+
"Topic :: Software Development :: Libraries :: Python Modules",
|
|
26
|
+
]
|
|
27
|
+
requires-python = ">=3.8"
|
|
28
|
+
dependencies = [
|
|
29
|
+
"requests>=2.25.0",
|
|
30
|
+
"python-dotenv>=0.19.0",
|
|
31
|
+
]
|
|
32
|
+
|
|
33
|
+
[project.optional-dependencies]
|
|
34
|
+
dev = [
|
|
35
|
+
"pytest>=7.0.0",
|
|
36
|
+
"pytest-cov>=4.0.0",
|
|
37
|
+
"black>=23.0.0",
|
|
38
|
+
"flake8>=6.0.0",
|
|
39
|
+
"mypy>=1.0.0",
|
|
40
|
+
]
|
|
41
|
+
|
|
42
|
+
[project.urls]
|
|
43
|
+
Homepage = "https://github.com/kanchengw/cnllm"
|
|
44
|
+
Documentation = "https://github.com/kanchengw/cnllm#readme"
|
|
45
|
+
Repository = "https://github.com/kanchengw/cnllm"
|
|
46
|
+
Issues = "https://github.com/kanchengw/cnllm/issues"
|
|
47
|
+
|
|
48
|
+
[tool.setuptools.packages.find]
|
|
49
|
+
where = ["."]
|
|
50
|
+
include = ["cnllm*"]
|
|
51
|
+
|
|
52
|
+
[tool.black]
|
|
53
|
+
line-length = 100
|
|
54
|
+
target-version = ['py38', 'py39', 'py310', 'py311', 'py312']
|
|
55
|
+
|
|
56
|
+
[tool.isort]
|
|
57
|
+
profile = "black"
|
|
58
|
+
line_length = 100
|
|
59
|
+
|
|
60
|
+
[tool.mypy]
|
|
61
|
+
python_version = "3.8"
|
|
62
|
+
warn_return_any = true
|
|
63
|
+
warn_unused_configs = true
|
|
64
|
+
disallow_untyped_defs = false
|
|
65
|
+
|
|
66
|
+
[tool.pytest.ini_options]
|
|
67
|
+
testpaths = ["tests"]
|
|
68
|
+
python_files = ["test_*.py"]
|
|
69
|
+
python_functions = ["test_*"]
|
|
70
|
+
addopts = "-v --tb=short"
|
cnllm-0.1.0/setup.cfg
ADDED