tamar-model-client 0.1.1__tar.gz → 0.1.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {tamar_model_client-0.1.1 → tamar_model_client-0.1.2}/PKG-INFO +61 -90
- {tamar_model_client-0.1.1 → tamar_model_client-0.1.2}/README.md +60 -89
- {tamar_model_client-0.1.1 → tamar_model_client-0.1.2}/setup.py +2 -2
- {tamar_model_client-0.1.1/model_manager_client → tamar_model_client-0.1.2/tamar_model_client}/__init__.py +4 -4
- {tamar_model_client-0.1.1/model_manager_client → tamar_model_client-0.1.2/tamar_model_client}/async_client.py +1 -1
- {tamar_model_client-0.1.1/model_manager_client → tamar_model_client-0.1.2/tamar_model_client}/generated/model_service_pb2_grpc.py +1 -1
- {tamar_model_client-0.1.1/model_manager_client → tamar_model_client-0.1.2/tamar_model_client}/schemas/inputs.py +2 -2
- {tamar_model_client-0.1.1/model_manager_client → tamar_model_client-0.1.2/tamar_model_client}/sync_client.py +9 -9
- {tamar_model_client-0.1.1 → tamar_model_client-0.1.2}/tamar_model_client.egg-info/PKG-INFO +61 -90
- tamar_model_client-0.1.2/tamar_model_client.egg-info/SOURCES.txt +22 -0
- tamar_model_client-0.1.2/tamar_model_client.egg-info/top_level.txt +1 -0
- tamar_model_client-0.1.1/tamar_model_client.egg-info/SOURCES.txt +0 -22
- tamar_model_client-0.1.1/tamar_model_client.egg-info/top_level.txt +0 -1
- {tamar_model_client-0.1.1 → tamar_model_client-0.1.2}/setup.cfg +0 -0
- {tamar_model_client-0.1.1/model_manager_client → tamar_model_client-0.1.2/tamar_model_client}/auth.py +0 -0
- {tamar_model_client-0.1.1/model_manager_client → tamar_model_client-0.1.2/tamar_model_client}/enums/__init__.py +0 -0
- {tamar_model_client-0.1.1/model_manager_client → tamar_model_client-0.1.2/tamar_model_client}/enums/channel.py +0 -0
- {tamar_model_client-0.1.1/model_manager_client → tamar_model_client-0.1.2/tamar_model_client}/enums/invoke.py +0 -0
- {tamar_model_client-0.1.1/model_manager_client → tamar_model_client-0.1.2/tamar_model_client}/enums/providers.py +0 -0
- {tamar_model_client-0.1.1/model_manager_client → tamar_model_client-0.1.2/tamar_model_client}/exceptions.py +0 -0
- {tamar_model_client-0.1.1/model_manager_client → tamar_model_client-0.1.2/tamar_model_client}/generated/__init__.py +0 -0
- {tamar_model_client-0.1.1/model_manager_client → tamar_model_client-0.1.2/tamar_model_client}/generated/model_service_pb2.py +0 -0
- {tamar_model_client-0.1.1/model_manager_client → tamar_model_client-0.1.2/tamar_model_client}/schemas/__init__.py +0 -0
- {tamar_model_client-0.1.1/model_manager_client → tamar_model_client-0.1.2/tamar_model_client}/schemas/outputs.py +0 -0
- {tamar_model_client-0.1.1 → tamar_model_client-0.1.2}/tamar_model_client.egg-info/dependency_links.txt +0 -0
- {tamar_model_client-0.1.1 → tamar_model_client-0.1.2}/tamar_model_client.egg-info/requires.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: tamar-model-client
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.2
|
4
4
|
Summary: A Python SDK for interacting with the Model Manager gRPC service
|
5
5
|
Home-page: http://gitlab.tamaredge.top/project-tap/AgentOS/model-manager-client
|
6
6
|
Author: Oscar Ou
|
@@ -31,9 +31,9 @@ Dynamic: requires-dist
|
|
31
31
|
Dynamic: requires-python
|
32
32
|
Dynamic: summary
|
33
33
|
|
34
|
-
# Model
|
34
|
+
# Tamar Model Client
|
35
35
|
|
36
|
-
**Model
|
36
|
+
**Tamar Model Client** 是一款高性能的 Python SDK,用于连接 Model Manager gRPC 服务,统一调用多家第三方 AI
|
37
37
|
模型服务商(如OpenAI、Google、Azure OpenAI)。
|
38
38
|
|
39
39
|
## ✨ 特性亮点
|
@@ -58,7 +58,7 @@ pip install tamar-model-client
|
|
58
58
|
## 🏗️ 项目结构概览
|
59
59
|
|
60
60
|
```
|
61
|
-
|
61
|
+
tamar_model_client/
|
62
62
|
├── generated/ # gRPC 生成的代码
|
63
63
|
│ ├── model_service.proto # 协议定义文件
|
64
64
|
│ ├── model_service_pb2.py # 生成的 protobuf 代码
|
@@ -82,16 +82,16 @@ model_manager_client/
|
|
82
82
|
### 客户端初始化
|
83
83
|
|
84
84
|
```python
|
85
|
-
from
|
85
|
+
from tamar_model_client import TamarModelClient, AsyncTamarModelClient
|
86
86
|
|
87
87
|
# 同步客户端
|
88
|
-
client =
|
88
|
+
client = TamarModelClient(
|
89
89
|
server_address="localhost:50051",
|
90
90
|
jwt_token="your-jwt-token"
|
91
91
|
)
|
92
92
|
|
93
93
|
# 异步客户端
|
94
|
-
async_client =
|
94
|
+
async_client = AsyncTamarModelClient(
|
95
95
|
server_address="localhost:50051",
|
96
96
|
jwt_secret_key="your-jwt-secret-key" # 使用固定密钥自动生成 JWT
|
97
97
|
)
|
@@ -105,12 +105,12 @@ async_client = AsyncModelManagerClient(
|
|
105
105
|
#### OpenAI 调用示例
|
106
106
|
|
107
107
|
```python
|
108
|
-
from
|
109
|
-
from
|
110
|
-
from
|
108
|
+
from tamar_model_client import TamarModelClient
|
109
|
+
from tamar_model_client.schemas import ModelRequest, UserContext
|
110
|
+
from tamar_model_client.enums import ProviderType, InvokeType, Channel
|
111
111
|
|
112
112
|
# 创建同步客户端
|
113
|
-
client =
|
113
|
+
client = TamarModelClient()
|
114
114
|
|
115
115
|
# OpenAI 调用示例
|
116
116
|
request_data = ModelRequest(
|
@@ -144,12 +144,12 @@ else:
|
|
144
144
|
#### Google 调用示例 (AI Studio / Vertex AI)
|
145
145
|
|
146
146
|
```python
|
147
|
-
from
|
148
|
-
from
|
149
|
-
from
|
147
|
+
from tamar_model_client import TamarModelClient
|
148
|
+
from tamar_model_client.schemas import ModelRequest, UserContext
|
149
|
+
from tamar_model_client.enums import ProviderType, InvokeType, Channel
|
150
150
|
|
151
151
|
# 创建同步客户端
|
152
|
-
client =
|
152
|
+
client = TamarModelClient()
|
153
153
|
|
154
154
|
# Google AI Studio 调用示例
|
155
155
|
request_data = ModelRequest(
|
@@ -207,12 +207,12 @@ else:
|
|
207
207
|
#### Azure OpenAI 调用示例
|
208
208
|
|
209
209
|
```python
|
210
|
-
from
|
211
|
-
from
|
212
|
-
from
|
210
|
+
from tamar_model_client import TamarModelClient
|
211
|
+
from tamar_model_client.schemas import ModelRequest, UserContext
|
212
|
+
from tamar_model_client.enums import ProviderType, InvokeType, Channel
|
213
213
|
|
214
214
|
# 创建同步客户端
|
215
|
-
client =
|
215
|
+
client = TamarModelClient()
|
216
216
|
|
217
217
|
# Azure OpenAI 调用示例
|
218
218
|
request_data = ModelRequest(
|
@@ -247,14 +247,14 @@ else:
|
|
247
247
|
|
248
248
|
```python
|
249
249
|
import asyncio
|
250
|
-
from
|
251
|
-
from
|
252
|
-
from
|
250
|
+
from tamar_model_client import AsyncTamarModelClient
|
251
|
+
from tamar_model_client.schemas import ModelRequest, UserContext
|
252
|
+
from tamar_model_client.enums import ProviderType, InvokeType, Channel
|
253
253
|
|
254
254
|
|
255
255
|
async def main():
|
256
256
|
# 创建异步客户端
|
257
|
-
client =
|
257
|
+
client = AsyncTamarModelClient()
|
258
258
|
|
259
259
|
# 组装请求参数
|
260
260
|
request_data = ModelRequest(
|
@@ -293,14 +293,14 @@ asyncio.run(main())
|
|
293
293
|
|
294
294
|
```python
|
295
295
|
import asyncio
|
296
|
-
from
|
297
|
-
from
|
298
|
-
from
|
296
|
+
from tamar_model_client import AsyncTamarModelClient
|
297
|
+
from tamar_model_client.schemas import ModelRequest, UserContext
|
298
|
+
from tamar_model_client.enums import ProviderType, InvokeType, Channel
|
299
299
|
|
300
300
|
|
301
301
|
async def stream_example():
|
302
302
|
# 创建异步客户端
|
303
|
-
client =
|
303
|
+
client = AsyncTamarModelClient()
|
304
304
|
|
305
305
|
# 组装请求参数
|
306
306
|
request_data = ModelRequest(
|
@@ -340,17 +340,17 @@ asyncio.run(stream_example())
|
|
340
340
|
|
341
341
|
```python
|
342
342
|
import asyncio
|
343
|
-
from
|
344
|
-
from
|
343
|
+
from tamar_model_client import AsyncTamarModelClient
|
344
|
+
from tamar_model_client.schemas import (
|
345
345
|
BatchModelRequest, BatchModelRequestItem,
|
346
346
|
UserContext
|
347
347
|
)
|
348
|
-
from
|
348
|
+
from tamar_model_client.enums import ProviderType, InvokeType, Channel
|
349
349
|
|
350
350
|
|
351
351
|
async def batch_example():
|
352
352
|
# 创建异步客户端
|
353
|
-
client =
|
353
|
+
client = AsyncTamarModelClient()
|
354
354
|
|
355
355
|
# 组装批量请求参数
|
356
356
|
batch_request = BatchModelRequest(
|
@@ -404,77 +404,48 @@ asyncio.run(batch_example())
|
|
404
404
|
|
405
405
|
### 文件输入示例
|
406
406
|
|
407
|
-
支持处理图像等文件输入(需使用支持多模态的模型,如
|
407
|
+
支持处理图像等文件输入(需使用支持多模态的模型,如 gemini-2.0-flash):
|
408
408
|
|
409
409
|
```python
|
410
|
-
import
|
411
|
-
from
|
412
|
-
from
|
413
|
-
from
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
provider=ProviderType.OPENAI,
|
423
|
-
channel=Channel.OPENAI,
|
424
|
-
invoke_type=InvokeType.CHAT_COMPLETIONS,
|
425
|
-
model="gpt-4-vision-preview", # 使用支持图像的模型
|
426
|
-
messages=[
|
427
|
-
{
|
428
|
-
"role": "user",
|
429
|
-
"content": [
|
430
|
-
{
|
431
|
-
"type": "image_url",
|
432
|
-
"image_url": {
|
433
|
-
"url": "https://example.com/image.jpg"
|
434
|
-
}
|
435
|
-
},
|
436
|
-
{
|
437
|
-
"type": "text",
|
438
|
-
"text": "请描述这张图片。"
|
439
|
-
}
|
440
|
-
]
|
441
|
-
}
|
442
|
-
],
|
443
|
-
user_context=UserContext(
|
444
|
-
user_id="test_user",
|
445
|
-
org_id="test_org",
|
446
|
-
client_type="python-sdk"
|
410
|
+
from tamar_model_client import TamarModelClient
|
411
|
+
from tamar_model_client.schemas import ModelRequest, UserContext
|
412
|
+
from tamar_model_client.enums import ProviderType
|
413
|
+
from google.genai.types import Part
|
414
|
+
model_request = ModelRequest(
|
415
|
+
provider=ProviderType.GOOGLE, # 选择 Google作为提供商
|
416
|
+
model="gemini-2.0-flash",
|
417
|
+
contents=[
|
418
|
+
"What is shown in this image?",
|
419
|
+
Part.from_uri( # 这个是Google那边的参数支持
|
420
|
+
file_uri="https://images.pexels.com/photos/248797/pexels-photo-248797.jpeg",
|
421
|
+
mime_type="image/jpeg",
|
447
422
|
),
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
# 运行文件输入示例
|
462
|
-
asyncio.run(file_input_example())
|
423
|
+
],
|
424
|
+
user_context=UserContext(
|
425
|
+
org_id="testllm",
|
426
|
+
user_id="testllm",
|
427
|
+
client_type="conversation-service"
|
428
|
+
),
|
429
|
+
)
|
430
|
+
client = TamarModelClient("localhost:50051")
|
431
|
+
response = client.invoke(
|
432
|
+
model_request=model_request
|
433
|
+
)
|
463
434
|
```
|
464
435
|
|
465
436
|
### ⚠️ 注意事项
|
466
437
|
|
467
|
-
以下是使用 Model
|
438
|
+
以下是使用 Tamar Model Client 时的重要提示:
|
468
439
|
|
469
440
|
- **参数处理**
|
470
|
-
- 公共参数包括:**服务商 (provider)**、**渠道 (channel)**
|
441
|
+
- 公共参数包括:**服务商 (provider)**、**渠道 (channel)** 、 **调用方法 (invoke_type)** 以及 **用户信息(user_context)**
|
471
442
|
- 其中 **channel** 和 **invoke_type** 为可选参数,**建议默认使用系统自动推断**,除非有特殊需求再显式指定
|
472
443
|
- 是否流式输出由公共参数 **stream** 控制,其他参数遵循对应服务商官方 SDK 的标准定义
|
473
444
|
- **客户端连接管理**
|
474
445
|
- gRPC 使用 HTTP/2 长连接,**建议将客户端实例作为单例使用**
|
475
|
-
- 若需创建多个实例,**请务必调用** `client.close()`
|
446
|
+
- 若需创建多个实例,**请务必调用** `client.close()` **方法手动关闭连接**,以防止连接堆积或资源泄露
|
476
447
|
- **异常处理**:
|
477
|
-
- 所有接口均提供详细的**错误信息** 以及 **请求ID(request_id
|
448
|
+
- 所有接口均提供详细的**错误信息** 以及 **请求ID(request_id)**,业务调用时建议纳入对应日志便于后期排错
|
478
449
|
|
479
450
|
## ⚙️ 环境变量配置(推荐)
|
480
451
|
|
@@ -528,9 +499,9 @@ MODEL_MANAGER_SERVER_GRPC_RETRY_DELAY=1.0
|
|
528
499
|
加载后,初始化时无需传参:
|
529
500
|
|
530
501
|
```python
|
531
|
-
from
|
502
|
+
from tamar_model_client import TamarModelClient
|
532
503
|
|
533
|
-
client =
|
504
|
+
client = TamarModelClient() # 将使用环境变量中的配置
|
534
505
|
```
|
535
506
|
|
536
507
|
## 开发
|
@@ -1,6 +1,6 @@
|
|
1
|
-
# Model
|
1
|
+
# Tamar Model Client
|
2
2
|
|
3
|
-
**Model
|
3
|
+
**Tamar Model Client** 是一款高性能的 Python SDK,用于连接 Model Manager gRPC 服务,统一调用多家第三方 AI
|
4
4
|
模型服务商(如OpenAI、Google、Azure OpenAI)。
|
5
5
|
|
6
6
|
## ✨ 特性亮点
|
@@ -25,7 +25,7 @@ pip install tamar-model-client
|
|
25
25
|
## 🏗️ 项目结构概览
|
26
26
|
|
27
27
|
```
|
28
|
-
|
28
|
+
tamar_model_client/
|
29
29
|
├── generated/ # gRPC 生成的代码
|
30
30
|
│ ├── model_service.proto # 协议定义文件
|
31
31
|
│ ├── model_service_pb2.py # 生成的 protobuf 代码
|
@@ -49,16 +49,16 @@ model_manager_client/
|
|
49
49
|
### 客户端初始化
|
50
50
|
|
51
51
|
```python
|
52
|
-
from
|
52
|
+
from tamar_model_client import TamarModelClient, AsyncTamarModelClient
|
53
53
|
|
54
54
|
# 同步客户端
|
55
|
-
client =
|
55
|
+
client = TamarModelClient(
|
56
56
|
server_address="localhost:50051",
|
57
57
|
jwt_token="your-jwt-token"
|
58
58
|
)
|
59
59
|
|
60
60
|
# 异步客户端
|
61
|
-
async_client =
|
61
|
+
async_client = AsyncTamarModelClient(
|
62
62
|
server_address="localhost:50051",
|
63
63
|
jwt_secret_key="your-jwt-secret-key" # 使用固定密钥自动生成 JWT
|
64
64
|
)
|
@@ -72,12 +72,12 @@ async_client = AsyncModelManagerClient(
|
|
72
72
|
#### OpenAI 调用示例
|
73
73
|
|
74
74
|
```python
|
75
|
-
from
|
76
|
-
from
|
77
|
-
from
|
75
|
+
from tamar_model_client import TamarModelClient
|
76
|
+
from tamar_model_client.schemas import ModelRequest, UserContext
|
77
|
+
from tamar_model_client.enums import ProviderType, InvokeType, Channel
|
78
78
|
|
79
79
|
# 创建同步客户端
|
80
|
-
client =
|
80
|
+
client = TamarModelClient()
|
81
81
|
|
82
82
|
# OpenAI 调用示例
|
83
83
|
request_data = ModelRequest(
|
@@ -111,12 +111,12 @@ else:
|
|
111
111
|
#### Google 调用示例 (AI Studio / Vertex AI)
|
112
112
|
|
113
113
|
```python
|
114
|
-
from
|
115
|
-
from
|
116
|
-
from
|
114
|
+
from tamar_model_client import TamarModelClient
|
115
|
+
from tamar_model_client.schemas import ModelRequest, UserContext
|
116
|
+
from tamar_model_client.enums import ProviderType, InvokeType, Channel
|
117
117
|
|
118
118
|
# 创建同步客户端
|
119
|
-
client =
|
119
|
+
client = TamarModelClient()
|
120
120
|
|
121
121
|
# Google AI Studio 调用示例
|
122
122
|
request_data = ModelRequest(
|
@@ -174,12 +174,12 @@ else:
|
|
174
174
|
#### Azure OpenAI 调用示例
|
175
175
|
|
176
176
|
```python
|
177
|
-
from
|
178
|
-
from
|
179
|
-
from
|
177
|
+
from tamar_model_client import TamarModelClient
|
178
|
+
from tamar_model_client.schemas import ModelRequest, UserContext
|
179
|
+
from tamar_model_client.enums import ProviderType, InvokeType, Channel
|
180
180
|
|
181
181
|
# 创建同步客户端
|
182
|
-
client =
|
182
|
+
client = TamarModelClient()
|
183
183
|
|
184
184
|
# Azure OpenAI 调用示例
|
185
185
|
request_data = ModelRequest(
|
@@ -214,14 +214,14 @@ else:
|
|
214
214
|
|
215
215
|
```python
|
216
216
|
import asyncio
|
217
|
-
from
|
218
|
-
from
|
219
|
-
from
|
217
|
+
from tamar_model_client import AsyncTamarModelClient
|
218
|
+
from tamar_model_client.schemas import ModelRequest, UserContext
|
219
|
+
from tamar_model_client.enums import ProviderType, InvokeType, Channel
|
220
220
|
|
221
221
|
|
222
222
|
async def main():
|
223
223
|
# 创建异步客户端
|
224
|
-
client =
|
224
|
+
client = AsyncTamarModelClient()
|
225
225
|
|
226
226
|
# 组装请求参数
|
227
227
|
request_data = ModelRequest(
|
@@ -260,14 +260,14 @@ asyncio.run(main())
|
|
260
260
|
|
261
261
|
```python
|
262
262
|
import asyncio
|
263
|
-
from
|
264
|
-
from
|
265
|
-
from
|
263
|
+
from tamar_model_client import AsyncTamarModelClient
|
264
|
+
from tamar_model_client.schemas import ModelRequest, UserContext
|
265
|
+
from tamar_model_client.enums import ProviderType, InvokeType, Channel
|
266
266
|
|
267
267
|
|
268
268
|
async def stream_example():
|
269
269
|
# 创建异步客户端
|
270
|
-
client =
|
270
|
+
client = AsyncTamarModelClient()
|
271
271
|
|
272
272
|
# 组装请求参数
|
273
273
|
request_data = ModelRequest(
|
@@ -307,17 +307,17 @@ asyncio.run(stream_example())
|
|
307
307
|
|
308
308
|
```python
|
309
309
|
import asyncio
|
310
|
-
from
|
311
|
-
from
|
310
|
+
from tamar_model_client import AsyncTamarModelClient
|
311
|
+
from tamar_model_client.schemas import (
|
312
312
|
BatchModelRequest, BatchModelRequestItem,
|
313
313
|
UserContext
|
314
314
|
)
|
315
|
-
from
|
315
|
+
from tamar_model_client.enums import ProviderType, InvokeType, Channel
|
316
316
|
|
317
317
|
|
318
318
|
async def batch_example():
|
319
319
|
# 创建异步客户端
|
320
|
-
client =
|
320
|
+
client = AsyncTamarModelClient()
|
321
321
|
|
322
322
|
# 组装批量请求参数
|
323
323
|
batch_request = BatchModelRequest(
|
@@ -371,77 +371,48 @@ asyncio.run(batch_example())
|
|
371
371
|
|
372
372
|
### 文件输入示例
|
373
373
|
|
374
|
-
支持处理图像等文件输入(需使用支持多模态的模型,如
|
374
|
+
支持处理图像等文件输入(需使用支持多模态的模型,如 gemini-2.0-flash):
|
375
375
|
|
376
376
|
```python
|
377
|
-
import
|
378
|
-
from
|
379
|
-
from
|
380
|
-
from
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
provider=ProviderType.OPENAI,
|
390
|
-
channel=Channel.OPENAI,
|
391
|
-
invoke_type=InvokeType.CHAT_COMPLETIONS,
|
392
|
-
model="gpt-4-vision-preview", # 使用支持图像的模型
|
393
|
-
messages=[
|
394
|
-
{
|
395
|
-
"role": "user",
|
396
|
-
"content": [
|
397
|
-
{
|
398
|
-
"type": "image_url",
|
399
|
-
"image_url": {
|
400
|
-
"url": "https://example.com/image.jpg"
|
401
|
-
}
|
402
|
-
},
|
403
|
-
{
|
404
|
-
"type": "text",
|
405
|
-
"text": "请描述这张图片。"
|
406
|
-
}
|
407
|
-
]
|
408
|
-
}
|
409
|
-
],
|
410
|
-
user_context=UserContext(
|
411
|
-
user_id="test_user",
|
412
|
-
org_id="test_org",
|
413
|
-
client_type="python-sdk"
|
377
|
+
from tamar_model_client import TamarModelClient
|
378
|
+
from tamar_model_client.schemas import ModelRequest, UserContext
|
379
|
+
from tamar_model_client.enums import ProviderType
|
380
|
+
from google.genai.types import Part
|
381
|
+
model_request = ModelRequest(
|
382
|
+
provider=ProviderType.GOOGLE, # 选择 Google作为提供商
|
383
|
+
model="gemini-2.0-flash",
|
384
|
+
contents=[
|
385
|
+
"What is shown in this image?",
|
386
|
+
Part.from_uri( # 这个是Google那边的参数支持
|
387
|
+
file_uri="https://images.pexels.com/photos/248797/pexels-photo-248797.jpeg",
|
388
|
+
mime_type="image/jpeg",
|
414
389
|
),
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
# 运行文件输入示例
|
429
|
-
asyncio.run(file_input_example())
|
390
|
+
],
|
391
|
+
user_context=UserContext(
|
392
|
+
org_id="testllm",
|
393
|
+
user_id="testllm",
|
394
|
+
client_type="conversation-service"
|
395
|
+
),
|
396
|
+
)
|
397
|
+
client = TamarModelClient("localhost:50051")
|
398
|
+
response = client.invoke(
|
399
|
+
model_request=model_request
|
400
|
+
)
|
430
401
|
```
|
431
402
|
|
432
403
|
### ⚠️ 注意事项
|
433
404
|
|
434
|
-
以下是使用 Model
|
405
|
+
以下是使用 Tamar Model Client 时的重要提示:
|
435
406
|
|
436
407
|
- **参数处理**
|
437
|
-
- 公共参数包括:**服务商 (provider)**、**渠道 (channel)**
|
408
|
+
- 公共参数包括:**服务商 (provider)**、**渠道 (channel)** 、 **调用方法 (invoke_type)** 以及 **用户信息(user_context)**
|
438
409
|
- 其中 **channel** 和 **invoke_type** 为可选参数,**建议默认使用系统自动推断**,除非有特殊需求再显式指定
|
439
410
|
- 是否流式输出由公共参数 **stream** 控制,其他参数遵循对应服务商官方 SDK 的标准定义
|
440
411
|
- **客户端连接管理**
|
441
412
|
- gRPC 使用 HTTP/2 长连接,**建议将客户端实例作为单例使用**
|
442
|
-
- 若需创建多个实例,**请务必调用** `client.close()`
|
413
|
+
- 若需创建多个实例,**请务必调用** `client.close()` **方法手动关闭连接**,以防止连接堆积或资源泄露
|
443
414
|
- **异常处理**:
|
444
|
-
- 所有接口均提供详细的**错误信息** 以及 **请求ID(request_id
|
415
|
+
- 所有接口均提供详细的**错误信息** 以及 **请求ID(request_id)**,业务调用时建议纳入对应日志便于后期排错
|
445
416
|
|
446
417
|
## ⚙️ 环境变量配置(推荐)
|
447
418
|
|
@@ -495,9 +466,9 @@ MODEL_MANAGER_SERVER_GRPC_RETRY_DELAY=1.0
|
|
495
466
|
加载后,初始化时无需传参:
|
496
467
|
|
497
468
|
```python
|
498
|
-
from
|
469
|
+
from tamar_model_client import TamarModelClient
|
499
470
|
|
500
|
-
client =
|
471
|
+
client = TamarModelClient() # 将使用环境变量中的配置
|
501
472
|
```
|
502
473
|
|
503
474
|
## 开发
|
@@ -2,14 +2,14 @@ from setuptools import setup, find_packages
|
|
2
2
|
|
3
3
|
setup(
|
4
4
|
name="tamar-model-client",
|
5
|
-
version="0.1.
|
5
|
+
version="0.1.2",
|
6
6
|
description="A Python SDK for interacting with the Model Manager gRPC service",
|
7
7
|
author="Oscar Ou",
|
8
8
|
author_email="oscar.ou@tamaredge.ai",
|
9
9
|
packages=find_packages(),
|
10
10
|
include_package_data=True, # 包含非 .py 文件
|
11
11
|
package_data={
|
12
|
-
"
|
12
|
+
"tamar_model_client": ["generated/*.py"], # 包含 gRPC 生成文件
|
13
13
|
},
|
14
14
|
install_requires=[
|
15
15
|
"grpcio",
|
@@ -1,10 +1,10 @@
|
|
1
|
-
from .sync_client import
|
2
|
-
from .async_client import
|
1
|
+
from .sync_client import TamarModelClient
|
2
|
+
from .async_client import AsyncTamarModelClient
|
3
3
|
from .exceptions import ModelManagerClientError, ConnectionError, ValidationError
|
4
4
|
|
5
5
|
__all__ = [
|
6
|
-
"
|
7
|
-
"
|
6
|
+
"TamarModelClient",
|
7
|
+
"AsyncTamarModelClient",
|
8
8
|
"ModelManagerClientError",
|
9
9
|
"ConnectionError",
|
10
10
|
"ValidationError",
|
@@ -11,8 +11,8 @@ from openai.types.responses import ResponseInputParam, ResponseIncludable, Respo
|
|
11
11
|
from pydantic import BaseModel, model_validator
|
12
12
|
from typing import List, Optional, Union, Iterable, Dict, Literal
|
13
13
|
|
14
|
-
from
|
15
|
-
from
|
14
|
+
from tamar_model_client.enums import ProviderType, InvokeType
|
15
|
+
from tamar_model_client.enums.channel import Channel
|
16
16
|
|
17
17
|
|
18
18
|
class UserContext(BaseModel):
|
@@ -3,16 +3,16 @@ import atexit
|
|
3
3
|
import logging
|
4
4
|
from typing import Optional, Union, Iterator
|
5
5
|
|
6
|
-
from .async_client import
|
6
|
+
from .async_client import AsyncTamarModelClient
|
7
7
|
from .schemas import ModelRequest, BatchModelRequest, ModelResponse, BatchModelResponse
|
8
8
|
|
9
9
|
logger = logging.getLogger(__name__)
|
10
10
|
|
11
11
|
|
12
|
-
class
|
12
|
+
class TamarModelClient:
|
13
13
|
"""
|
14
14
|
同步版本的模型管理客户端,用于非异步环境(如 Flask、Django、脚本)。
|
15
|
-
内部封装
|
15
|
+
内部封装 AsyncTamarModelClient 并处理事件循环兼容性。
|
16
16
|
"""
|
17
17
|
_loop: Optional[asyncio.AbstractEventLoop] = None
|
18
18
|
|
@@ -27,16 +27,16 @@ class ModelManagerClient:
|
|
27
27
|
retry_delay: float = 1.0,
|
28
28
|
):
|
29
29
|
# 初始化全局事件循环,仅创建一次
|
30
|
-
if not
|
30
|
+
if not TamarModelClient._loop:
|
31
31
|
try:
|
32
|
-
|
32
|
+
TamarModelClient._loop = asyncio.get_running_loop()
|
33
33
|
except RuntimeError:
|
34
|
-
|
35
|
-
asyncio.set_event_loop(
|
34
|
+
TamarModelClient._loop = asyncio.new_event_loop()
|
35
|
+
asyncio.set_event_loop(TamarModelClient._loop)
|
36
36
|
|
37
|
-
self._loop =
|
37
|
+
self._loop = TamarModelClient._loop
|
38
38
|
|
39
|
-
self._async_client =
|
39
|
+
self._async_client = AsyncTamarModelClient(
|
40
40
|
server_address=server_address,
|
41
41
|
jwt_secret_key=jwt_secret_key,
|
42
42
|
jwt_token=jwt_token,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: tamar-model-client
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.2
|
4
4
|
Summary: A Python SDK for interacting with the Model Manager gRPC service
|
5
5
|
Home-page: http://gitlab.tamaredge.top/project-tap/AgentOS/model-manager-client
|
6
6
|
Author: Oscar Ou
|
@@ -31,9 +31,9 @@ Dynamic: requires-dist
|
|
31
31
|
Dynamic: requires-python
|
32
32
|
Dynamic: summary
|
33
33
|
|
34
|
-
# Model
|
34
|
+
# Tamar Model Client
|
35
35
|
|
36
|
-
**Model
|
36
|
+
**Tamar Model Client** 是一款高性能的 Python SDK,用于连接 Model Manager gRPC 服务,统一调用多家第三方 AI
|
37
37
|
模型服务商(如OpenAI、Google、Azure OpenAI)。
|
38
38
|
|
39
39
|
## ✨ 特性亮点
|
@@ -58,7 +58,7 @@ pip install tamar-model-client
|
|
58
58
|
## 🏗️ 项目结构概览
|
59
59
|
|
60
60
|
```
|
61
|
-
|
61
|
+
tamar_model_client/
|
62
62
|
├── generated/ # gRPC 生成的代码
|
63
63
|
│ ├── model_service.proto # 协议定义文件
|
64
64
|
│ ├── model_service_pb2.py # 生成的 protobuf 代码
|
@@ -82,16 +82,16 @@ model_manager_client/
|
|
82
82
|
### 客户端初始化
|
83
83
|
|
84
84
|
```python
|
85
|
-
from
|
85
|
+
from tamar_model_client import TamarModelClient, AsyncTamarModelClient
|
86
86
|
|
87
87
|
# 同步客户端
|
88
|
-
client =
|
88
|
+
client = TamarModelClient(
|
89
89
|
server_address="localhost:50051",
|
90
90
|
jwt_token="your-jwt-token"
|
91
91
|
)
|
92
92
|
|
93
93
|
# 异步客户端
|
94
|
-
async_client =
|
94
|
+
async_client = AsyncTamarModelClient(
|
95
95
|
server_address="localhost:50051",
|
96
96
|
jwt_secret_key="your-jwt-secret-key" # 使用固定密钥自动生成 JWT
|
97
97
|
)
|
@@ -105,12 +105,12 @@ async_client = AsyncModelManagerClient(
|
|
105
105
|
#### OpenAI 调用示例
|
106
106
|
|
107
107
|
```python
|
108
|
-
from
|
109
|
-
from
|
110
|
-
from
|
108
|
+
from tamar_model_client import TamarModelClient
|
109
|
+
from tamar_model_client.schemas import ModelRequest, UserContext
|
110
|
+
from tamar_model_client.enums import ProviderType, InvokeType, Channel
|
111
111
|
|
112
112
|
# 创建同步客户端
|
113
|
-
client =
|
113
|
+
client = TamarModelClient()
|
114
114
|
|
115
115
|
# OpenAI 调用示例
|
116
116
|
request_data = ModelRequest(
|
@@ -144,12 +144,12 @@ else:
|
|
144
144
|
#### Google 调用示例 (AI Studio / Vertex AI)
|
145
145
|
|
146
146
|
```python
|
147
|
-
from
|
148
|
-
from
|
149
|
-
from
|
147
|
+
from tamar_model_client import TamarModelClient
|
148
|
+
from tamar_model_client.schemas import ModelRequest, UserContext
|
149
|
+
from tamar_model_client.enums import ProviderType, InvokeType, Channel
|
150
150
|
|
151
151
|
# 创建同步客户端
|
152
|
-
client =
|
152
|
+
client = TamarModelClient()
|
153
153
|
|
154
154
|
# Google AI Studio 调用示例
|
155
155
|
request_data = ModelRequest(
|
@@ -207,12 +207,12 @@ else:
|
|
207
207
|
#### Azure OpenAI 调用示例
|
208
208
|
|
209
209
|
```python
|
210
|
-
from
|
211
|
-
from
|
212
|
-
from
|
210
|
+
from tamar_model_client import TamarModelClient
|
211
|
+
from tamar_model_client.schemas import ModelRequest, UserContext
|
212
|
+
from tamar_model_client.enums import ProviderType, InvokeType, Channel
|
213
213
|
|
214
214
|
# 创建同步客户端
|
215
|
-
client =
|
215
|
+
client = TamarModelClient()
|
216
216
|
|
217
217
|
# Azure OpenAI 调用示例
|
218
218
|
request_data = ModelRequest(
|
@@ -247,14 +247,14 @@ else:
|
|
247
247
|
|
248
248
|
```python
|
249
249
|
import asyncio
|
250
|
-
from
|
251
|
-
from
|
252
|
-
from
|
250
|
+
from tamar_model_client import AsyncTamarModelClient
|
251
|
+
from tamar_model_client.schemas import ModelRequest, UserContext
|
252
|
+
from tamar_model_client.enums import ProviderType, InvokeType, Channel
|
253
253
|
|
254
254
|
|
255
255
|
async def main():
|
256
256
|
# 创建异步客户端
|
257
|
-
client =
|
257
|
+
client = AsyncTamarModelClient()
|
258
258
|
|
259
259
|
# 组装请求参数
|
260
260
|
request_data = ModelRequest(
|
@@ -293,14 +293,14 @@ asyncio.run(main())
|
|
293
293
|
|
294
294
|
```python
|
295
295
|
import asyncio
|
296
|
-
from
|
297
|
-
from
|
298
|
-
from
|
296
|
+
from tamar_model_client import AsyncTamarModelClient
|
297
|
+
from tamar_model_client.schemas import ModelRequest, UserContext
|
298
|
+
from tamar_model_client.enums import ProviderType, InvokeType, Channel
|
299
299
|
|
300
300
|
|
301
301
|
async def stream_example():
|
302
302
|
# 创建异步客户端
|
303
|
-
client =
|
303
|
+
client = AsyncTamarModelClient()
|
304
304
|
|
305
305
|
# 组装请求参数
|
306
306
|
request_data = ModelRequest(
|
@@ -340,17 +340,17 @@ asyncio.run(stream_example())
|
|
340
340
|
|
341
341
|
```python
|
342
342
|
import asyncio
|
343
|
-
from
|
344
|
-
from
|
343
|
+
from tamar_model_client import AsyncTamarModelClient
|
344
|
+
from tamar_model_client.schemas import (
|
345
345
|
BatchModelRequest, BatchModelRequestItem,
|
346
346
|
UserContext
|
347
347
|
)
|
348
|
-
from
|
348
|
+
from tamar_model_client.enums import ProviderType, InvokeType, Channel
|
349
349
|
|
350
350
|
|
351
351
|
async def batch_example():
|
352
352
|
# 创建异步客户端
|
353
|
-
client =
|
353
|
+
client = AsyncTamarModelClient()
|
354
354
|
|
355
355
|
# 组装批量请求参数
|
356
356
|
batch_request = BatchModelRequest(
|
@@ -404,77 +404,48 @@ asyncio.run(batch_example())
|
|
404
404
|
|
405
405
|
### 文件输入示例
|
406
406
|
|
407
|
-
支持处理图像等文件输入(需使用支持多模态的模型,如
|
407
|
+
支持处理图像等文件输入(需使用支持多模态的模型,如 gemini-2.0-flash):
|
408
408
|
|
409
409
|
```python
|
410
|
-
import
|
411
|
-
from
|
412
|
-
from
|
413
|
-
from
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
provider=ProviderType.OPENAI,
|
423
|
-
channel=Channel.OPENAI,
|
424
|
-
invoke_type=InvokeType.CHAT_COMPLETIONS,
|
425
|
-
model="gpt-4-vision-preview", # 使用支持图像的模型
|
426
|
-
messages=[
|
427
|
-
{
|
428
|
-
"role": "user",
|
429
|
-
"content": [
|
430
|
-
{
|
431
|
-
"type": "image_url",
|
432
|
-
"image_url": {
|
433
|
-
"url": "https://example.com/image.jpg"
|
434
|
-
}
|
435
|
-
},
|
436
|
-
{
|
437
|
-
"type": "text",
|
438
|
-
"text": "请描述这张图片。"
|
439
|
-
}
|
440
|
-
]
|
441
|
-
}
|
442
|
-
],
|
443
|
-
user_context=UserContext(
|
444
|
-
user_id="test_user",
|
445
|
-
org_id="test_org",
|
446
|
-
client_type="python-sdk"
|
410
|
+
from tamar_model_client import TamarModelClient
|
411
|
+
from tamar_model_client.schemas import ModelRequest, UserContext
|
412
|
+
from tamar_model_client.enums import ProviderType
|
413
|
+
from google.genai.types import Part
|
414
|
+
model_request = ModelRequest(
|
415
|
+
provider=ProviderType.GOOGLE, # 选择 Google作为提供商
|
416
|
+
model="gemini-2.0-flash",
|
417
|
+
contents=[
|
418
|
+
"What is shown in this image?",
|
419
|
+
Part.from_uri( # 这个是Google那边的参数支持
|
420
|
+
file_uri="https://images.pexels.com/photos/248797/pexels-photo-248797.jpeg",
|
421
|
+
mime_type="image/jpeg",
|
447
422
|
),
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
# 运行文件输入示例
|
462
|
-
asyncio.run(file_input_example())
|
423
|
+
],
|
424
|
+
user_context=UserContext(
|
425
|
+
org_id="testllm",
|
426
|
+
user_id="testllm",
|
427
|
+
client_type="conversation-service"
|
428
|
+
),
|
429
|
+
)
|
430
|
+
client = TamarModelClient("localhost:50051")
|
431
|
+
response = client.invoke(
|
432
|
+
model_request=model_request
|
433
|
+
)
|
463
434
|
```
|
464
435
|
|
465
436
|
### ⚠️ 注意事项
|
466
437
|
|
467
|
-
以下是使用 Model
|
438
|
+
以下是使用 Tamar Model Client 时的重要提示:
|
468
439
|
|
469
440
|
- **参数处理**
|
470
|
-
- 公共参数包括:**服务商 (provider)**、**渠道 (channel)**
|
441
|
+
- 公共参数包括:**服务商 (provider)**、**渠道 (channel)** 、 **调用方法 (invoke_type)** 以及 **用户信息(user_context)**
|
471
442
|
- 其中 **channel** 和 **invoke_type** 为可选参数,**建议默认使用系统自动推断**,除非有特殊需求再显式指定
|
472
443
|
- 是否流式输出由公共参数 **stream** 控制,其他参数遵循对应服务商官方 SDK 的标准定义
|
473
444
|
- **客户端连接管理**
|
474
445
|
- gRPC 使用 HTTP/2 长连接,**建议将客户端实例作为单例使用**
|
475
|
-
- 若需创建多个实例,**请务必调用** `client.close()`
|
446
|
+
- 若需创建多个实例,**请务必调用** `client.close()` **方法手动关闭连接**,以防止连接堆积或资源泄露
|
476
447
|
- **异常处理**:
|
477
|
-
- 所有接口均提供详细的**错误信息** 以及 **请求ID(request_id
|
448
|
+
- 所有接口均提供详细的**错误信息** 以及 **请求ID(request_id)**,业务调用时建议纳入对应日志便于后期排错
|
478
449
|
|
479
450
|
## ⚙️ 环境变量配置(推荐)
|
480
451
|
|
@@ -528,9 +499,9 @@ MODEL_MANAGER_SERVER_GRPC_RETRY_DELAY=1.0
|
|
528
499
|
加载后,初始化时无需传参:
|
529
500
|
|
530
501
|
```python
|
531
|
-
from
|
502
|
+
from tamar_model_client import TamarModelClient
|
532
503
|
|
533
|
-
client =
|
504
|
+
client = TamarModelClient() # 将使用环境变量中的配置
|
534
505
|
```
|
535
506
|
|
536
507
|
## 开发
|
@@ -0,0 +1,22 @@
|
|
1
|
+
README.md
|
2
|
+
setup.py
|
3
|
+
tamar_model_client/__init__.py
|
4
|
+
tamar_model_client/async_client.py
|
5
|
+
tamar_model_client/auth.py
|
6
|
+
tamar_model_client/exceptions.py
|
7
|
+
tamar_model_client/sync_client.py
|
8
|
+
tamar_model_client.egg-info/PKG-INFO
|
9
|
+
tamar_model_client.egg-info/SOURCES.txt
|
10
|
+
tamar_model_client.egg-info/dependency_links.txt
|
11
|
+
tamar_model_client.egg-info/requires.txt
|
12
|
+
tamar_model_client.egg-info/top_level.txt
|
13
|
+
tamar_model_client/enums/__init__.py
|
14
|
+
tamar_model_client/enums/channel.py
|
15
|
+
tamar_model_client/enums/invoke.py
|
16
|
+
tamar_model_client/enums/providers.py
|
17
|
+
tamar_model_client/generated/__init__.py
|
18
|
+
tamar_model_client/generated/model_service_pb2.py
|
19
|
+
tamar_model_client/generated/model_service_pb2_grpc.py
|
20
|
+
tamar_model_client/schemas/__init__.py
|
21
|
+
tamar_model_client/schemas/inputs.py
|
22
|
+
tamar_model_client/schemas/outputs.py
|
@@ -0,0 +1 @@
|
|
1
|
+
tamar_model_client
|
@@ -1,22 +0,0 @@
|
|
1
|
-
README.md
|
2
|
-
setup.py
|
3
|
-
model_manager_client/__init__.py
|
4
|
-
model_manager_client/async_client.py
|
5
|
-
model_manager_client/auth.py
|
6
|
-
model_manager_client/exceptions.py
|
7
|
-
model_manager_client/sync_client.py
|
8
|
-
model_manager_client/enums/__init__.py
|
9
|
-
model_manager_client/enums/channel.py
|
10
|
-
model_manager_client/enums/invoke.py
|
11
|
-
model_manager_client/enums/providers.py
|
12
|
-
model_manager_client/generated/__init__.py
|
13
|
-
model_manager_client/generated/model_service_pb2.py
|
14
|
-
model_manager_client/generated/model_service_pb2_grpc.py
|
15
|
-
model_manager_client/schemas/__init__.py
|
16
|
-
model_manager_client/schemas/inputs.py
|
17
|
-
model_manager_client/schemas/outputs.py
|
18
|
-
tamar_model_client.egg-info/PKG-INFO
|
19
|
-
tamar_model_client.egg-info/SOURCES.txt
|
20
|
-
tamar_model_client.egg-info/dependency_links.txt
|
21
|
-
tamar_model_client.egg-info/requires.txt
|
22
|
-
tamar_model_client.egg-info/top_level.txt
|
@@ -1 +0,0 @@
|
|
1
|
-
model_manager_client
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{tamar_model_client-0.1.1 → tamar_model_client-0.1.2}/tamar_model_client.egg-info/requires.txt
RENAMED
File without changes
|