vectorvein 0.3.16__tar.gz → 0.3.18__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vectorvein-0.3.18/PKG-INFO +716 -0
- vectorvein-0.3.18/README.md +686 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/pyproject.toml +16 -1
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/api/__init__.py +4 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/api/client.py +132 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/api/models.py +39 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/chat_clients/anthropic_client.py +4 -3
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/chat_clients/base_client.py +8 -4
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/chat_clients/openai_compatible_client.py +80 -116
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/chat_clients/utils.py +41 -30
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/settings/__init__.py +1 -1
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/types/defaults.py +9 -2
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/nodes/media_editing.py +20 -1
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/nodes/triggers.py +0 -1
- vectorvein-0.3.16/PKG-INFO +0 -174
- vectorvein-0.3.16/README.md +0 -144
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/__init__.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/api/exceptions.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/chat_clients/__init__.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/chat_clients/baichuan_client.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/chat_clients/deepseek_client.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/chat_clients/ernie_client.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/chat_clients/gemini_client.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/chat_clients/groq_client.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/chat_clients/local_client.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/chat_clients/minimax_client.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/chat_clients/mistral_client.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/chat_clients/moonshot_client.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/chat_clients/openai_client.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/chat_clients/py.typed +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/chat_clients/qwen_client.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/chat_clients/stepfun_client.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/chat_clients/xai_client.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/chat_clients/yi_client.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/chat_clients/zhipuai_client.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/py.typed +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/server/token_server.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/settings/py.typed +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/types/__init__.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/types/enums.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/types/exception.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/types/llm_parameters.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/types/py.typed +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/types/settings.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/utilities/media_processing.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/utilities/rate_limiter.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/utilities/retry.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/graph/edge.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/graph/node.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/graph/port.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/graph/workflow.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/nodes/__init__.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/nodes/audio_generation.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/nodes/control_flows.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/nodes/file_processing.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/nodes/image_generation.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/nodes/llms.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/nodes/media_processing.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/nodes/output.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/nodes/relational_db.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/nodes/text_processing.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/nodes/tools.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/nodes/vector_db.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/nodes/video_generation.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/nodes/web_crawlers.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/utils/analyse.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/utils/check.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/utils/json_to_code.py +0 -0
- {vectorvein-0.3.16 → vectorvein-0.3.18}/src/vectorvein/workflow/utils/layout.py +0 -0
@@ -0,0 +1,716 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: vectorvein
|
3
|
+
Version: 0.3.18
|
4
|
+
Summary: VectorVein Python SDK
|
5
|
+
Author-Email: Anderson <andersonby@163.com>
|
6
|
+
License: MIT
|
7
|
+
Requires-Python: >=3.10
|
8
|
+
Requires-Dist: openai>=1.99.3
|
9
|
+
Requires-Dist: tiktoken>=0.7.0
|
10
|
+
Requires-Dist: httpx>=0.27.0
|
11
|
+
Requires-Dist: anthropic>=0.47.1
|
12
|
+
Requires-Dist: pydantic>=2.8.2
|
13
|
+
Requires-Dist: Pillow>=10.4.0
|
14
|
+
Requires-Dist: deepseek-tokenizer>=0.1.0
|
15
|
+
Requires-Dist: qwen-tokenizer>=0.2.0
|
16
|
+
Requires-Dist: pycryptodome>=3.21.0
|
17
|
+
Provides-Extra: server
|
18
|
+
Requires-Dist: fastapi; extra == "server"
|
19
|
+
Requires-Dist: uvicorn; extra == "server"
|
20
|
+
Provides-Extra: redis
|
21
|
+
Requires-Dist: redis; extra == "redis"
|
22
|
+
Provides-Extra: diskcache
|
23
|
+
Requires-Dist: diskcache; extra == "diskcache"
|
24
|
+
Provides-Extra: vertex
|
25
|
+
Requires-Dist: google-auth>=2.35.0; extra == "vertex"
|
26
|
+
Provides-Extra: bedrock
|
27
|
+
Requires-Dist: boto3>=1.28.57; extra == "bedrock"
|
28
|
+
Requires-Dist: botocore>=1.31.57; extra == "bedrock"
|
29
|
+
Description-Content-Type: text/markdown
|
30
|
+
|
31
|
+
# VectorVein Python SDK
|
32
|
+
|
33
|
+
[](https://badge.fury.io/py/vectorvein)
|
34
|
+
[](https://pypi.org/project/vectorvein/)
|
35
|
+
[](https://opensource.org/licenses/MIT)
|
36
|
+
|
37
|
+
VectorVein Python SDK 是一个功能强大的 Python 库,提供了对向量脉络(VectorVein)平台的完整访问能力。它包含两大核心功能:
|
38
|
+
1. **向量脉络 API 客户端** - 用于调用向量脉络的工作流和VApp功能
|
39
|
+
2. **多模型聊天客户端** - 统一的接口支持多种大语言模型(Claude、OpenAI、通义千问、智谱AI等)
|
40
|
+
3. **工作流设计框架** - 用于构建和设计复杂的AI工作流
|
41
|
+
|
42
|
+
## 🚀 快速开始
|
43
|
+
|
44
|
+
### 安装
|
45
|
+
|
46
|
+
```bash
|
47
|
+
pip install vectorvein
|
48
|
+
```
|
49
|
+
|
50
|
+
### 基本使用
|
51
|
+
|
52
|
+
#### 1. VectorVein API 客户端
|
53
|
+
|
54
|
+
```python
|
55
|
+
from vectorvein.api import VectorVeinClient, WorkflowInputField
|
56
|
+
|
57
|
+
# 创建客户端实例
|
58
|
+
client = VectorVeinClient(api_key="YOUR_API_KEY")
|
59
|
+
|
60
|
+
# 准备工作流输入字段
|
61
|
+
input_fields = [
|
62
|
+
WorkflowInputField(
|
63
|
+
node_id="8fc6eceb-8599-46a7-87fe-58bf7c0b633e",
|
64
|
+
field_name="商品名称",
|
65
|
+
value="测试商品"
|
66
|
+
)
|
67
|
+
]
|
68
|
+
|
69
|
+
# 异步运行工作流
|
70
|
+
rid = client.run_workflow(
|
71
|
+
wid="abcde0985736457aa72cc667f17bfc89",
|
72
|
+
input_fields=input_fields,
|
73
|
+
wait_for_completion=False
|
74
|
+
)
|
75
|
+
print(f"工作流运行ID: {rid}")
|
76
|
+
|
77
|
+
# 同步运行工作流
|
78
|
+
result = client.run_workflow(
|
79
|
+
wid="abcde0985736457aa72cc667f17bfc89",
|
80
|
+
input_fields=input_fields,
|
81
|
+
wait_for_completion=True
|
82
|
+
)
|
83
|
+
print(f"工作流运行结果: {result}")
|
84
|
+
```
|
85
|
+
|
86
|
+
#### 2. 聊天客户端
|
87
|
+
|
88
|
+
```python
|
89
|
+
from vectorvein.chat_clients import create_chat_client, BackendType
|
90
|
+
from vectorvein.settings import settings
|
91
|
+
|
92
|
+
# 加载设置(包含API密钥等配置)
|
93
|
+
settings.load({
|
94
|
+
"rate_limit": {
|
95
|
+
"enabled": True,
|
96
|
+
"backend": "redis", # 或 "diskcache"
|
97
|
+
"redis": {
|
98
|
+
"host": "127.0.0.1",
|
99
|
+
"port": 6379,
|
100
|
+
"db": 0,
|
101
|
+
},
|
102
|
+
"default_rpm": 60,
|
103
|
+
"default_tpm": 1000000,
|
104
|
+
},
|
105
|
+
"endpoints": [
|
106
|
+
{
|
107
|
+
"id": "anthropic-default",
|
108
|
+
"api_base": "https://api.anthropic.com",
|
109
|
+
"api_key": "your_claude_api_key",
|
110
|
+
"rpm": 60,
|
111
|
+
"tpm": 1000000
|
112
|
+
},
|
113
|
+
{
|
114
|
+
"id": "openai-default",
|
115
|
+
"api_base": "https://api.openai.com/v1",
|
116
|
+
"api_key": "your_openai_api_key",
|
117
|
+
"rpm": 3500,
|
118
|
+
"tpm": 90000
|
119
|
+
}
|
120
|
+
],
|
121
|
+
"anthropic": {
|
122
|
+
"models": {
|
123
|
+
"claude-3-7-sonnet-20250219": {
|
124
|
+
"id": "claude-3-7-sonnet-20250219",
|
125
|
+
"endpoints": ["anthropic-default"],
|
126
|
+
"context_length": 200000,
|
127
|
+
"max_output_tokens": 8192,
|
128
|
+
"function_call_available": True,
|
129
|
+
"native_multimodal": True
|
130
|
+
}
|
131
|
+
}
|
132
|
+
},
|
133
|
+
"openai": {
|
134
|
+
"models": {
|
135
|
+
"gpt-4o": {
|
136
|
+
"id": "gpt-4o",
|
137
|
+
"endpoints": ["openai-default"],
|
138
|
+
"context_length": 128000,
|
139
|
+
"max_output_tokens": 16384,
|
140
|
+
"function_call_available": True,
|
141
|
+
"response_format_available": True,
|
142
|
+
"native_multimodal": True
|
143
|
+
}
|
144
|
+
}
|
145
|
+
}
|
146
|
+
})
|
147
|
+
|
148
|
+
# 创建 Claude 客户端
|
149
|
+
client = create_chat_client(BackendType.Anthropic, model="claude-3-7-sonnet-20250219")
|
150
|
+
|
151
|
+
# 发送消息
|
152
|
+
response = client.create_completion([
|
153
|
+
{"role": "user", "content": "你好,请介绍一下人工智能的发展历程"}
|
154
|
+
])
|
155
|
+
print(response.content)
|
156
|
+
|
157
|
+
# 创建 OpenAI 客户端
|
158
|
+
openai_client = create_chat_client(BackendType.OpenAI, model="gpt-4o")
|
159
|
+
|
160
|
+
# 流式响应
|
161
|
+
for chunk in openai_client.create_stream([
|
162
|
+
{"role": "user", "content": "写一首关于春天的诗"}
|
163
|
+
]):
|
164
|
+
print(chunk.content, end="", flush=True)
|
165
|
+
```
|
166
|
+
|
167
|
+
#### 3. 工作流设计
|
168
|
+
|
169
|
+
```python
|
170
|
+
from vectorvein.workflow.graph.workflow import Workflow
|
171
|
+
from vectorvein.workflow.nodes.llms import Claude
|
172
|
+
from vectorvein.workflow.nodes.text_processing import TemplateCompose
|
173
|
+
from vectorvein.workflow.nodes.output import Text
|
174
|
+
|
175
|
+
# 创建工作流
|
176
|
+
workflow = Workflow()
|
177
|
+
|
178
|
+
# 创建节点
|
179
|
+
template = TemplateCompose()
|
180
|
+
template.add_port(name="用户输入", port_type="textarea", show=True)
|
181
|
+
template.ports["template"].value = "请回答以下问题:{{用户输入}}"
|
182
|
+
|
183
|
+
claude = Claude()
|
184
|
+
claude.ports["llm_model"].value = "claude-3-7-sonnet-20250219"
|
185
|
+
claude.ports["temperature"].value = 0.7
|
186
|
+
|
187
|
+
output = Text()
|
188
|
+
output.ports["output_title"].value = "AI回答"
|
189
|
+
|
190
|
+
# 添加节点到工作流
|
191
|
+
workflow.add_nodes([template, claude, output])
|
192
|
+
|
193
|
+
# 连接节点
|
194
|
+
workflow.connect(template, "output", claude, "prompt")
|
195
|
+
workflow.connect(claude, "output", output, "text")
|
196
|
+
|
197
|
+
# 布局和导出
|
198
|
+
workflow.layout()
|
199
|
+
print(workflow.to_json())
|
200
|
+
```
|
201
|
+
|
202
|
+
## 📚 功能特性
|
203
|
+
|
204
|
+
### VectorVein API 客户端
|
205
|
+
|
206
|
+
- **工作流管理**: 运行工作流、检查状态、管理执行
|
207
|
+
- **访问密钥管理**: 创建、获取、列表、更新、删除访问密钥
|
208
|
+
- **VApp 集成**: 生成VApp访问链接
|
209
|
+
- **异步支持**: 完整的异步API支持
|
210
|
+
- **错误处理**: 详细的异常类型和错误信息
|
211
|
+
|
212
|
+
### 聊天客户端
|
213
|
+
|
214
|
+
#### 支持的模型提供商
|
215
|
+
|
216
|
+
- **Anthropic**: Claude-3, Claude-3.5, Claude-4, Claude Opus 等
|
217
|
+
- **OpenAI**: GPT-3.5, GPT-4, GPT-4o, o1, o3 系列
|
218
|
+
- **阿里云**: 通义千问 Qwen2.5, Qwen3, QVQ 等
|
219
|
+
- **智谱AI**: GLM-4, GLM-4.5, GLM-Z1 等
|
220
|
+
- **DeepSeek**: DeepSeek-Chat, DeepSeek-Reasoner
|
221
|
+
- **月之暗面**: Kimi, Moonshot 系列
|
222
|
+
- **Google**: Gemini 1.5, Gemini 2.0, Gemini 2.5
|
223
|
+
- **百川智能**: Baichuan3, Baichuan4
|
224
|
+
- **零一万物**: Yi-Lightning, Yi-Vision
|
225
|
+
- **MiniMax**: MiniMax-Text, MiniMax-M1
|
226
|
+
- **Mistral**: Mistral Large, Codestral
|
227
|
+
- **Groq**: Llama3, Mixtral 等
|
228
|
+
- **XAI**: Grok-2, Grok-3, Grok-4
|
229
|
+
- **百度文心**: ERNIE 系列
|
230
|
+
- **阶跃星辰**: Step-1, Step-2 系列
|
231
|
+
- **本地模型**: 支持本地部署的模型
|
232
|
+
|
233
|
+
#### 核心功能
|
234
|
+
|
235
|
+
- **统一接口**: 所有模型使用相同的API接口
|
236
|
+
- **流式响应**: 支持实时流式输出
|
237
|
+
- **多模态**: 支持图像、音频输入的模型
|
238
|
+
- **工具调用**: 支持Function Calling的模型
|
239
|
+
- **上下文管理**: 自动处理上下文长度限制
|
240
|
+
- **令牌统计**: 精确的令牌计数和使用统计
|
241
|
+
- **速率限制**: 内置速率限制和重试机制
|
242
|
+
- **响应格式**: 支持JSON模式等结构化输出
|
243
|
+
|
244
|
+
### 工作流设计框架
|
245
|
+
|
246
|
+
- **可视化节点**: 丰富的预置节点库
|
247
|
+
- **灵活连接**: 节点间的数据流连接
|
248
|
+
- **批量处理**: 支持列表输入的批量处理
|
249
|
+
- **代码执行**: 内置Python代码执行节点
|
250
|
+
- **文件处理**: 文档读取、图像处理、音频处理
|
251
|
+
- **数据输出**: 表格、文档、图表等多种输出格式
|
252
|
+
|
253
|
+
## 🔧 安装和配置
|
254
|
+
|
255
|
+
### 依赖要求
|
256
|
+
|
257
|
+
- Python 3.10+
|
258
|
+
- 各模型API密钥(按需配置)
|
259
|
+
|
260
|
+
### 可选依赖
|
261
|
+
|
262
|
+
```bash
|
263
|
+
# 服务器功能
|
264
|
+
pip install vectorvein[server]
|
265
|
+
|
266
|
+
# Redis缓存
|
267
|
+
pip install vectorvein[redis]
|
268
|
+
|
269
|
+
# 磁盘缓存
|
270
|
+
pip install vectorvein[diskcache]
|
271
|
+
|
272
|
+
# Google Vertex AI
|
273
|
+
pip install vectorvein[vertex]
|
274
|
+
|
275
|
+
# AWS Bedrock
|
276
|
+
pip install vectorvein[bedrock]
|
277
|
+
```
|
278
|
+
|
279
|
+
### 设置配置
|
280
|
+
|
281
|
+
```python
|
282
|
+
from vectorvein.settings import settings
|
283
|
+
|
284
|
+
# 通过字典配置(v2 版本)
|
285
|
+
settings_dict = {
|
286
|
+
"rate_limit": {
|
287
|
+
"enabled": True,
|
288
|
+
"backend": "redis", # 或 "diskcache"
|
289
|
+
"redis": {
|
290
|
+
"host": "127.0.0.1",
|
291
|
+
"port": 6379,
|
292
|
+
"db": 0,
|
293
|
+
},
|
294
|
+
"diskcache": {
|
295
|
+
"cache_dir": ".rate_limit_cache",
|
296
|
+
},
|
297
|
+
"default_rpm": 60,
|
298
|
+
"default_tpm": 1000000,
|
299
|
+
},
|
300
|
+
"endpoints": [
|
301
|
+
{
|
302
|
+
"id": "anthropic-default",
|
303
|
+
"api_base": "https://api.anthropic.com",
|
304
|
+
"api_key": "sk-ant-...",
|
305
|
+
"rpm": 60,
|
306
|
+
"tpm": 1000000,
|
307
|
+
"concurrent_requests": 5
|
308
|
+
},
|
309
|
+
{
|
310
|
+
"id": "openai-default",
|
311
|
+
"api_base": "https://api.openai.com/v1",
|
312
|
+
"api_key": "sk-...",
|
313
|
+
"rpm": 3500,
|
314
|
+
"tpm": 90000,
|
315
|
+
"concurrent_requests": 10
|
316
|
+
},
|
317
|
+
{
|
318
|
+
"id": "qwen-default",
|
319
|
+
"api_base": "https://dashscope.aliyuncs.com/compatible-mode/v1",
|
320
|
+
"api_key": "sk-...",
|
321
|
+
"rpm": 100,
|
322
|
+
"tpm": 1000000
|
323
|
+
},
|
324
|
+
{
|
325
|
+
"id": "azure-openai",
|
326
|
+
"region": "East US",
|
327
|
+
"api_base": "https://your-resource.openai.azure.com",
|
328
|
+
"api_key": "your-azure-key",
|
329
|
+
"rpm": 900,
|
330
|
+
"tpm": 150000,
|
331
|
+
"is_azure": True
|
332
|
+
},
|
333
|
+
{
|
334
|
+
"id": "vertex-anthropic",
|
335
|
+
"region": "europe-west1",
|
336
|
+
"api_base": "https://europe-west1-aiplatform.googleapis.com",
|
337
|
+
"credentials": {
|
338
|
+
"token": "...",
|
339
|
+
"refresh_token": "...",
|
340
|
+
"client_id": "...",
|
341
|
+
"client_secret": "...",
|
342
|
+
"quota_project_id": "your-project-id"
|
343
|
+
},
|
344
|
+
"is_vertex": True
|
345
|
+
}
|
346
|
+
],
|
347
|
+
"anthropic": {
|
348
|
+
"models": {
|
349
|
+
"claude-3-7-sonnet-20250219": {
|
350
|
+
"id": "claude-3-7-sonnet-20250219",
|
351
|
+
"endpoints": ["anthropic-default"],
|
352
|
+
"context_length": 200000,
|
353
|
+
"max_output_tokens": 8192,
|
354
|
+
"function_call_available": True,
|
355
|
+
"native_multimodal": True
|
356
|
+
},
|
357
|
+
"claude-3-5-sonnet-20240620": {
|
358
|
+
"id": "claude-3-5-sonnet@20240620",
|
359
|
+
"endpoints": ["vertex-anthropic"],
|
360
|
+
"context_length": 200000,
|
361
|
+
"max_output_tokens": 8192,
|
362
|
+
"function_call_available": True,
|
363
|
+
"native_multimodal": True
|
364
|
+
}
|
365
|
+
}
|
366
|
+
},
|
367
|
+
"openai": {
|
368
|
+
"models": {
|
369
|
+
"gpt-4o": {
|
370
|
+
"id": "gpt-4o",
|
371
|
+
"endpoints": ["openai-default", "azure-openai"],
|
372
|
+
"context_length": 128000,
|
373
|
+
"max_output_tokens": 16384,
|
374
|
+
"function_call_available": True,
|
375
|
+
"response_format_available": True,
|
376
|
+
"native_multimodal": True
|
377
|
+
},
|
378
|
+
"gpt-4o-mini": {
|
379
|
+
"id": "gpt-4o-mini",
|
380
|
+
"endpoints": ["openai-default"],
|
381
|
+
"context_length": 128000,
|
382
|
+
"max_output_tokens": 16384,
|
383
|
+
"function_call_available": True,
|
384
|
+
"response_format_available": True,
|
385
|
+
"native_multimodal": True
|
386
|
+
}
|
387
|
+
}
|
388
|
+
},
|
389
|
+
"qwen": {
|
390
|
+
"models": {
|
391
|
+
"qwen3-32b": {
|
392
|
+
"id": "qwen3-32b",
|
393
|
+
"endpoints": ["qwen-default"],
|
394
|
+
"context_length": 32768,
|
395
|
+
"max_output_tokens": 8192,
|
396
|
+
"function_call_available": True,
|
397
|
+
"response_format_available": True,
|
398
|
+
"native_multimodal": False
|
399
|
+
},
|
400
|
+
"qwen2.5-72b-instruct": {
|
401
|
+
"id": "qwen2.5-72b-instruct",
|
402
|
+
"endpoints": ["qwen-default"],
|
403
|
+
"context_length": 131072,
|
404
|
+
"max_output_tokens": 8192,
|
405
|
+
"function_call_available": True,
|
406
|
+
"response_format_available": True,
|
407
|
+
"native_multimodal": False
|
408
|
+
}
|
409
|
+
}
|
410
|
+
}
|
411
|
+
}
|
412
|
+
settings.load(settings_dict)
|
413
|
+
|
414
|
+
# 或通过文件配置
|
415
|
+
settings.load_from_file("config.json")
|
416
|
+
```
|
417
|
+
|
418
|
+
## 📖 详细文档
|
419
|
+
|
420
|
+
### API 客户端详细使用
|
421
|
+
|
422
|
+
#### 访问密钥管理
|
423
|
+
|
424
|
+
```python
|
425
|
+
from vectorvein.api import VectorVeinClient
|
426
|
+
|
427
|
+
client = VectorVeinClient(api_key="YOUR_API_KEY")
|
428
|
+
|
429
|
+
# 创建访问密钥
|
430
|
+
keys = client.create_access_keys(
|
431
|
+
access_key_type="L", # L: 长期, M: 多次, O: 一次性
|
432
|
+
app_id="YOUR_APP_ID",
|
433
|
+
count=1,
|
434
|
+
max_credits=500,
|
435
|
+
description="测试密钥"
|
436
|
+
)
|
437
|
+
|
438
|
+
# 获取访问密钥信息
|
439
|
+
keys_info = client.get_access_keys(["ACCESS_KEY_1", "ACCESS_KEY_2"])
|
440
|
+
|
441
|
+
# 列出访问密钥
|
442
|
+
response = client.list_access_keys(
|
443
|
+
page=1,
|
444
|
+
page_size=10,
|
445
|
+
sort_field="create_time",
|
446
|
+
sort_order="descend"
|
447
|
+
)
|
448
|
+
|
449
|
+
# 更新访问密钥
|
450
|
+
client.update_access_keys(
|
451
|
+
access_key="ACCESS_KEY",
|
452
|
+
description="更新的描述"
|
453
|
+
)
|
454
|
+
|
455
|
+
# 删除访问密钥
|
456
|
+
client.delete_access_keys(
|
457
|
+
app_id="YOUR_APP_ID",
|
458
|
+
access_keys=["ACCESS_KEY_1", "ACCESS_KEY_2"]
|
459
|
+
)
|
460
|
+
```
|
461
|
+
|
462
|
+
#### 生成VApp访问链接
|
463
|
+
|
464
|
+
```python
|
465
|
+
url = client.generate_vapp_url(
|
466
|
+
app_id="YOUR_APP_ID",
|
467
|
+
access_key="YOUR_ACCESS_KEY",
|
468
|
+
key_id="YOUR_KEY_ID"
|
469
|
+
)
|
470
|
+
print(f"VApp访问链接: {url}")
|
471
|
+
```
|
472
|
+
|
473
|
+
### 聊天客户端高级用法
|
474
|
+
|
475
|
+
#### 工具调用(Function Calling)
|
476
|
+
|
477
|
+
```python
|
478
|
+
from vectorvein.chat_clients import create_chat_client, BackendType
|
479
|
+
|
480
|
+
client = create_chat_client(BackendType.OpenAI, model="gpt-4o")
|
481
|
+
|
482
|
+
tools = [{
|
483
|
+
"type": "function",
|
484
|
+
"function": {
|
485
|
+
"name": "get_weather",
|
486
|
+
"description": "获取指定城市的天气信息",
|
487
|
+
"parameters": {
|
488
|
+
"type": "object",
|
489
|
+
"properties": {
|
490
|
+
"city": {"type": "string", "description": "城市名称"}
|
491
|
+
},
|
492
|
+
"required": ["city"]
|
493
|
+
}
|
494
|
+
}
|
495
|
+
}]
|
496
|
+
|
497
|
+
response = client.create_completion(
|
498
|
+
messages=[{"role": "user", "content": "北京今天天气怎么样?"}],
|
499
|
+
tools=tools
|
500
|
+
)
|
501
|
+
|
502
|
+
if response.tool_calls:
|
503
|
+
for tool_call in response.tool_calls:
|
504
|
+
print(f"调用工具: {tool_call.function.name}")
|
505
|
+
print(f"参数: {tool_call.function.arguments}")
|
506
|
+
```
|
507
|
+
|
508
|
+
#### 多模态输入
|
509
|
+
|
510
|
+
```python
|
511
|
+
client = create_chat_client(BackendType.Anthropic, model="claude-3-7-sonnet-20250219")
|
512
|
+
|
513
|
+
messages = [{
|
514
|
+
"role": "user",
|
515
|
+
"content": [
|
516
|
+
{"type": "text", "text": "这张图片里有什么?"},
|
517
|
+
{
|
518
|
+
"type": "image",
|
519
|
+
"source": {
|
520
|
+
"type": "base64",
|
521
|
+
"media_type": "image/jpeg",
|
522
|
+
"data": "base64_encoded_image_data"
|
523
|
+
}
|
524
|
+
}
|
525
|
+
]
|
526
|
+
}]
|
527
|
+
|
528
|
+
response = client.create_completion(messages)
|
529
|
+
```
|
530
|
+
|
531
|
+
#### 结构化输出
|
532
|
+
|
533
|
+
```python
|
534
|
+
client = create_chat_client(BackendType.OpenAI, model="gpt-4o")
|
535
|
+
|
536
|
+
response = client.create_completion(
|
537
|
+
messages=[{"role": "user", "content": "分析以下数据并返回JSON格式"}],
|
538
|
+
response_format={"type": "json_object"}
|
539
|
+
)
|
540
|
+
```
|
541
|
+
|
542
|
+
### 工作流节点参考
|
543
|
+
|
544
|
+
#### LLM 节点
|
545
|
+
|
546
|
+
```python
|
547
|
+
from vectorvein.workflow.nodes.llms import Claude, OpenAI, AliyunQwen
|
548
|
+
|
549
|
+
# Claude 节点
|
550
|
+
claude = Claude()
|
551
|
+
claude.ports["llm_model"].value = "claude-3-7-sonnet-20250219"
|
552
|
+
claude.ports["temperature"].value = 0.7
|
553
|
+
claude.ports["prompt"].show = True
|
554
|
+
|
555
|
+
# OpenAI 节点
|
556
|
+
openai = OpenAI()
|
557
|
+
openai.ports["llm_model"].value = "gpt-4o"
|
558
|
+
openai.ports["response_format"].value = "json_object"
|
559
|
+
|
560
|
+
# 通义千问节点
|
561
|
+
qwen = AliyunQwen()
|
562
|
+
qwen.ports["llm_model"].value = "qwen3-32b"
|
563
|
+
```
|
564
|
+
|
565
|
+
#### 文本处理节点
|
566
|
+
|
567
|
+
```python
|
568
|
+
from vectorvein.workflow.nodes.text_processing import (
|
569
|
+
TemplateCompose, TextSplitters, TextReplace
|
570
|
+
)
|
571
|
+
|
572
|
+
# 文本合成
|
573
|
+
template = TemplateCompose()
|
574
|
+
template.add_port(name="标题", port_type="text", show=True)
|
575
|
+
template.add_port(name="内容", port_type="textarea", show=True)
|
576
|
+
template.ports["template"].value = "# {{标题}}\n\n{{内容}}"
|
577
|
+
|
578
|
+
# 文本分割
|
579
|
+
splitter = TextSplitters()
|
580
|
+
splitter.ports["split_method"].value = "delimiter"
|
581
|
+
splitter.ports["delimiter"].value = "\n"
|
582
|
+
splitter.ports["text"].show = True
|
583
|
+
|
584
|
+
# 文本替换
|
585
|
+
replacer = TextReplace()
|
586
|
+
replacer.ports["old_text"].value = "旧文本"
|
587
|
+
replacer.ports["new_text"].value = "新文本"
|
588
|
+
replacer.ports["text"].show = True
|
589
|
+
```
|
590
|
+
|
591
|
+
#### 文件处理节点
|
592
|
+
|
593
|
+
```python
|
594
|
+
from vectorvein.workflow.nodes.file_processing import FileLoader
|
595
|
+
|
596
|
+
loader = FileLoader()
|
597
|
+
loader.ports["parse_quality"].value = "high" # 高质量解析
|
598
|
+
loader.ports["files"].show = True # 显示文件上传界面
|
599
|
+
```
|
600
|
+
|
601
|
+
#### 输出节点
|
602
|
+
|
603
|
+
```python
|
604
|
+
from vectorvein.workflow.nodes.output import Text, Table, Document
|
605
|
+
|
606
|
+
# 文本输出
|
607
|
+
text_output = Text()
|
608
|
+
text_output.ports["output_title"].value = "结果"
|
609
|
+
|
610
|
+
# 表格输出
|
611
|
+
table_output = Table()
|
612
|
+
|
613
|
+
# 文档输出
|
614
|
+
doc_output = Document()
|
615
|
+
doc_output.ports["file_name"].value = "报告"
|
616
|
+
doc_output.ports["export_type"].value = ".xlsx"
|
617
|
+
```
|
618
|
+
|
619
|
+
## 🔍 异常处理
|
620
|
+
|
621
|
+
```python
|
622
|
+
from vectorvein.api import (
|
623
|
+
VectorVeinAPIError, APIKeyError, WorkflowError,
|
624
|
+
AccessKeyError, RequestError, TimeoutError
|
625
|
+
)
|
626
|
+
|
627
|
+
try:
|
628
|
+
result = client.run_workflow(wid="invalid", input_fields=[])
|
629
|
+
except APIKeyError as e:
|
630
|
+
print(f"API密钥错误: {e}")
|
631
|
+
except WorkflowError as e:
|
632
|
+
print(f"工作流错误: {e}")
|
633
|
+
except TimeoutError as e:
|
634
|
+
print(f"请求超时: {e}")
|
635
|
+
except VectorVeinAPIError as e:
|
636
|
+
print(f"API错误: {e.message}, 状态码: {e.status_code}")
|
637
|
+
```
|
638
|
+
|
639
|
+
## 🧪 测试
|
640
|
+
|
641
|
+
```bash
|
642
|
+
# 安装开发依赖
|
643
|
+
pip install -e .[dev]
|
644
|
+
|
645
|
+
# 运行测试
|
646
|
+
pytest tests/
|
647
|
+
|
648
|
+
# 运行特定测试
|
649
|
+
pytest tests/test_simple.py -v
|
650
|
+
|
651
|
+
# 生成覆盖率报告
|
652
|
+
pytest --cov=vectorvein tests/
|
653
|
+
```
|
654
|
+
|
655
|
+
## 📝 开发指南
|
656
|
+
|
657
|
+
### 项目结构
|
658
|
+
|
659
|
+
```
|
660
|
+
src/vectorvein/
|
661
|
+
├── api/ # VectorVein API客户端
|
662
|
+
│ ├── client.py # 主要客户端类
|
663
|
+
│ ├── models.py # 数据模型
|
664
|
+
│ └── exceptions.py # 异常定义
|
665
|
+
├── chat_clients/ # 聊天客户端
|
666
|
+
│ ├── __init__.py # 客户端工厂函数
|
667
|
+
│ ├── base_client.py # 基础客户端类
|
668
|
+
│ ├── anthropic_client.py # Claude客户端
|
669
|
+
│ ├── openai_client.py # OpenAI客户端
|
670
|
+
│ └── ... # 其他模型客户端
|
671
|
+
├── workflow/ # 工作流设计框架
|
672
|
+
│ ├── graph/ # 图结构定义
|
673
|
+
│ ├── nodes/ # 节点定义
|
674
|
+
│ └── utils/ # 工具函数
|
675
|
+
├── settings/ # 配置管理
|
676
|
+
├── types/ # 类型定义
|
677
|
+
└── utilities/ # 实用工具
|
678
|
+
```
|
679
|
+
|
680
|
+
### 贡献代码
|
681
|
+
|
682
|
+
1. Fork 项目
|
683
|
+
2. 创建功能分支 (`git checkout -b feature/amazing-feature`)
|
684
|
+
3. 提交更改 (`git commit -m 'Add amazing feature'`)
|
685
|
+
4. 推送分支 (`git push origin feature/amazing-feature`)
|
686
|
+
5. 创建 Pull Request
|
687
|
+
|
688
|
+
### 代码规范
|
689
|
+
|
690
|
+
- 使用 `ruff` 进行代码格式化和检查
|
691
|
+
- 遵循 Python 类型提示
|
692
|
+
- 编写测试用例覆盖新功能
|
693
|
+
- 更新相关文档
|
694
|
+
|
695
|
+
## 🤝 社区和支持
|
696
|
+
|
697
|
+
- **文档**: [官方文档](https://docs.vectorvein.com)
|
698
|
+
- **问题反馈**: [GitHub Issues](https://github.com/vectorvein/python-vectorvein/issues)
|
699
|
+
- **讨论**: [GitHub Discussions](https://github.com/vectorvein/python-vectorvein/discussions)
|
700
|
+
- **更新日志**: [CHANGELOG.md](CHANGELOG.md)
|
701
|
+
|
702
|
+
## 📜 许可证
|
703
|
+
|
704
|
+
本项目采用 MIT 许可证 - 查看 [LICENSE](LICENSE) 文件了解详情。
|
705
|
+
|
706
|
+
## 🙏 致谢
|
707
|
+
|
708
|
+
感谢所有为 VectorVein Python SDK 做出贡献的开发者和用户。
|
709
|
+
|
710
|
+
---
|
711
|
+
|
712
|
+
**注意事项**:
|
713
|
+
1. 请妥善保管您的API密钥,不要将其泄露给他人
|
714
|
+
2. API调用有速率限制,请合理使用
|
715
|
+
3. 建议在生产环境中使用异步方式运行工作流
|
716
|
+
4. 不同模型支持的功能可能有所差异,请参考具体模型文档
|