tamar-model-client 0.2.4__tar.gz → 0.2.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/PKG-INFO +108 -5
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/README.md +105 -2
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/setup.py +3 -3
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/core/request_builder.py +25 -22
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/enums/invoke.py +1 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/schemas/__init__.py +2 -1
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/schemas/inputs.py +19 -5
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/utils.py +7 -1
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client.egg-info/PKG-INFO +108 -5
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client.egg-info/requires.txt +2 -2
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tests/test_google_azure_final.py +325 -57
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/setup.cfg +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/__init__.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/async_client.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/auth.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/circuit_breaker.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/core/__init__.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/core/base_client.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/core/http_fallback.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/core/logging_setup.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/core/request_id_manager.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/core/response_handler.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/core/utils.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/enums/__init__.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/enums/channel.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/enums/providers.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/error_handler.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/exceptions.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/generated/__init__.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/generated/model_service_pb2.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/generated/model_service_pb2_grpc.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/json_formatter.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/logging_icons.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/schemas/outputs.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/sync_client.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client.egg-info/SOURCES.txt +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client.egg-info/dependency_links.txt +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client.egg-info/top_level.txt +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tests/__init__.py +0 -0
- {tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tests/test_circuit_breaker.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: tamar-model-client
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.6
|
4
4
|
Summary: A Python SDK for interacting with the Model Manager gRPC service
|
5
5
|
Home-page: http://gitlab.tamaredge.top/project-tap/AgentOS/model-manager-client
|
6
6
|
Author: Oscar Ou
|
@@ -16,8 +16,8 @@ Requires-Dist: grpcio-tools~=1.67.1
|
|
16
16
|
Requires-Dist: pydantic
|
17
17
|
Requires-Dist: PyJWT
|
18
18
|
Requires-Dist: nest_asyncio
|
19
|
-
Requires-Dist: openai
|
20
|
-
Requires-Dist: google-genai
|
19
|
+
Requires-Dist: openai>=1.99.3
|
20
|
+
Requires-Dist: google-genai>=1.29.0
|
21
21
|
Requires-Dist: requests>=2.25.0
|
22
22
|
Requires-Dist: aiohttp>=3.7.0
|
23
23
|
Dynamic: author
|
@@ -54,7 +54,7 @@ Dynamic: summary
|
|
54
54
|
|
55
55
|
### 🔌 多服务商支持
|
56
56
|
- **OpenAI** (GPT-3.5/4, DALL-E)
|
57
|
-
- **Google** (Gemini - AI Studio & Vertex AI)
|
57
|
+
- **Google** (Gemini - AI Studio & Vertex AI, Imagen 图像生成)
|
58
58
|
- **Azure OpenAI** (企业级部署)
|
59
59
|
- **Anthropic** (Claude)
|
60
60
|
- **DeepSeek** (深度求索)
|
@@ -114,7 +114,7 @@ tamar_model_client/
|
|
114
114
|
│ └── outputs.py # 响应模型(ModelResponse, Usage)
|
115
115
|
├── 📁 enums/ # 枚举定义
|
116
116
|
│ ├── providers.py # AI 服务商(OpenAI, Google, Azure...)
|
117
|
-
│ ├── invoke.py # 调用类型(generation, images...)
|
117
|
+
│ ├── invoke.py # 调用类型(generation, images, image-generation-genai...)
|
118
118
|
│ └── channel.py # 服务通道(openai, vertexai...)
|
119
119
|
├── 📁 core/ # 核心功能模块
|
120
120
|
│ ├── base_client.py # 客户端基类(熔断、降级、配置)
|
@@ -288,6 +288,37 @@ else:
|
|
288
288
|
print(f"响应: {vertex_response.content}")
|
289
289
|
if vertex_response.usage:
|
290
290
|
print(f"Token 使用情况: {vertex_response.usage}")
|
291
|
+
|
292
|
+
# Google GenAI 图像生成示例
|
293
|
+
from google.genai import types
|
294
|
+
|
295
|
+
genai_image_request = ModelRequest(
|
296
|
+
provider=ProviderType.GOOGLE, # 选择 Google 作为提供商
|
297
|
+
channel=Channel.AI_STUDIO, # 使用 AI Studio 渠道
|
298
|
+
invoke_type=InvokeType.IMAGE_GENERATION_GENAI, # 使用 GenAI 图像生成调用类型
|
299
|
+
model="imagen-3.0-generate-001", # 指定图像生成模型
|
300
|
+
prompt="一只可爱的小猫在花园里玩耍", # 图像描述提示词
|
301
|
+
user_context=UserContext(
|
302
|
+
user_id="test_user",
|
303
|
+
org_id="test_org",
|
304
|
+
client_type="python-sdk"
|
305
|
+
),
|
306
|
+
# 使用 Google GenAI 类型构建配置
|
307
|
+
config=types.GenerateImagesConfig(
|
308
|
+
number_of_images=1,
|
309
|
+
aspect_ratio="1:1",
|
310
|
+
safety_filter_level="block_some"
|
311
|
+
)
|
312
|
+
)
|
313
|
+
|
314
|
+
# 发送图像生成请求并获取响应
|
315
|
+
image_response = client.invoke(genai_image_request)
|
316
|
+
if image_response.error:
|
317
|
+
print(f"错误: {image_response.error}")
|
318
|
+
else:
|
319
|
+
print(f"图像生成成功: {image_response.content}")
|
320
|
+
if image_response.usage:
|
321
|
+
print(f"使用情况: {image_response.usage}")
|
291
322
|
```
|
292
323
|
|
293
324
|
### Azure OpenAI 调用示例
|
@@ -488,6 +519,78 @@ async def batch_example():
|
|
488
519
|
asyncio.run(batch_example())
|
489
520
|
```
|
490
521
|
|
522
|
+
### 图像生成调用示例
|
523
|
+
|
524
|
+
支持 OpenAI DALL-E、Google Vertex AI 和 Google GenAI 图像生成:
|
525
|
+
|
526
|
+
```python
|
527
|
+
from tamar_model_client import TamarModelClient
|
528
|
+
from tamar_model_client.schemas import ModelRequest, UserContext
|
529
|
+
from tamar_model_client.enums import ProviderType, InvokeType, Channel
|
530
|
+
|
531
|
+
client = TamarModelClient()
|
532
|
+
|
533
|
+
# OpenAI DALL-E 图像生成
|
534
|
+
openai_image_request = ModelRequest(
|
535
|
+
provider=ProviderType.OPENAI,
|
536
|
+
channel=Channel.OPENAI,
|
537
|
+
invoke_type=InvokeType.IMAGE_GENERATION,
|
538
|
+
model="dall-e-3",
|
539
|
+
prompt="一只穿着西装的猫在办公室里工作",
|
540
|
+
user_context=UserContext(
|
541
|
+
user_id="test_user",
|
542
|
+
org_id="test_org",
|
543
|
+
client_type="python-sdk"
|
544
|
+
),
|
545
|
+
size="1024x1024",
|
546
|
+
quality="hd",
|
547
|
+
n=1
|
548
|
+
)
|
549
|
+
|
550
|
+
# Google Vertex AI 图像生成
|
551
|
+
vertex_image_request = ModelRequest(
|
552
|
+
provider=ProviderType.GOOGLE,
|
553
|
+
channel=Channel.VERTEXAI,
|
554
|
+
invoke_type=InvokeType.IMAGE_GENERATION,
|
555
|
+
model="imagegeneration@006",
|
556
|
+
prompt="一座美丽的山峰在日出时分",
|
557
|
+
user_context=UserContext(
|
558
|
+
user_id="test_user",
|
559
|
+
org_id="test_org",
|
560
|
+
client_type="python-sdk"
|
561
|
+
),
|
562
|
+
number_of_images=1,
|
563
|
+
aspect_ratio="1:1",
|
564
|
+
safety_filter_level="block_some"
|
565
|
+
)
|
566
|
+
|
567
|
+
# Google GenAI 图像生成(新增功能)
|
568
|
+
genai_image_request = ModelRequest(
|
569
|
+
provider=ProviderType.GOOGLE,
|
570
|
+
channel=Channel.AI_STUDIO,
|
571
|
+
invoke_type=InvokeType.IMAGE_GENERATION_GENAI, # 新增的调用类型
|
572
|
+
model="imagen-3.0-generate-001",
|
573
|
+
prompt="科幻风格的城市夜景,霓虹灯闪烁",
|
574
|
+
user_context=UserContext(
|
575
|
+
user_id="test_user",
|
576
|
+
org_id="test_org",
|
577
|
+
client_type="python-sdk"
|
578
|
+
),
|
579
|
+
config=types.GenerateImagesConfig(
|
580
|
+
number_of_images=1,
|
581
|
+
aspect_ratio="16:9"
|
582
|
+
)
|
583
|
+
)
|
584
|
+
|
585
|
+
# 发送请求
|
586
|
+
for request in [openai_image_request, vertex_image_request, genai_image_request]:
|
587
|
+
response = client.invoke(request)
|
588
|
+
if response.error:
|
589
|
+
print(f"图像生成失败: {response.error}")
|
590
|
+
else:
|
591
|
+
print(f"图像生成成功: {response.content}")
|
592
|
+
```
|
593
|
+
|
491
594
|
### 文件输入示例
|
492
595
|
|
493
596
|
支持处理图像等文件输入(需使用支持多模态的模型,如 gemini-2.0-flash):
|
@@ -22,7 +22,7 @@
|
|
22
22
|
|
23
23
|
### 🔌 多服务商支持
|
24
24
|
- **OpenAI** (GPT-3.5/4, DALL-E)
|
25
|
-
- **Google** (Gemini - AI Studio & Vertex AI)
|
25
|
+
- **Google** (Gemini - AI Studio & Vertex AI, Imagen 图像生成)
|
26
26
|
- **Azure OpenAI** (企业级部署)
|
27
27
|
- **Anthropic** (Claude)
|
28
28
|
- **DeepSeek** (深度求索)
|
@@ -82,7 +82,7 @@ tamar_model_client/
|
|
82
82
|
│ └── outputs.py # 响应模型(ModelResponse, Usage)
|
83
83
|
├── 📁 enums/ # 枚举定义
|
84
84
|
│ ├── providers.py # AI 服务商(OpenAI, Google, Azure...)
|
85
|
-
│ ├── invoke.py # 调用类型(generation, images...)
|
85
|
+
│ ├── invoke.py # 调用类型(generation, images, image-generation-genai...)
|
86
86
|
│ └── channel.py # 服务通道(openai, vertexai...)
|
87
87
|
├── 📁 core/ # 核心功能模块
|
88
88
|
│ ├── base_client.py # 客户端基类(熔断、降级、配置)
|
@@ -256,6 +256,37 @@ else:
|
|
256
256
|
print(f"响应: {vertex_response.content}")
|
257
257
|
if vertex_response.usage:
|
258
258
|
print(f"Token 使用情况: {vertex_response.usage}")
|
259
|
+
|
260
|
+
# Google GenAI 图像生成示例
|
261
|
+
from google.genai import types
|
262
|
+
|
263
|
+
genai_image_request = ModelRequest(
|
264
|
+
provider=ProviderType.GOOGLE, # 选择 Google 作为提供商
|
265
|
+
channel=Channel.AI_STUDIO, # 使用 AI Studio 渠道
|
266
|
+
invoke_type=InvokeType.IMAGE_GENERATION_GENAI, # 使用 GenAI 图像生成调用类型
|
267
|
+
model="imagen-3.0-generate-001", # 指定图像生成模型
|
268
|
+
prompt="一只可爱的小猫在花园里玩耍", # 图像描述提示词
|
269
|
+
user_context=UserContext(
|
270
|
+
user_id="test_user",
|
271
|
+
org_id="test_org",
|
272
|
+
client_type="python-sdk"
|
273
|
+
),
|
274
|
+
# 使用 Google GenAI 类型构建配置
|
275
|
+
config=types.GenerateImagesConfig(
|
276
|
+
number_of_images=1,
|
277
|
+
aspect_ratio="1:1",
|
278
|
+
safety_filter_level="block_some"
|
279
|
+
)
|
280
|
+
)
|
281
|
+
|
282
|
+
# 发送图像生成请求并获取响应
|
283
|
+
image_response = client.invoke(genai_image_request)
|
284
|
+
if image_response.error:
|
285
|
+
print(f"错误: {image_response.error}")
|
286
|
+
else:
|
287
|
+
print(f"图像生成成功: {image_response.content}")
|
288
|
+
if image_response.usage:
|
289
|
+
print(f"使用情况: {image_response.usage}")
|
259
290
|
```
|
260
291
|
|
261
292
|
### Azure OpenAI 调用示例
|
@@ -456,6 +487,78 @@ async def batch_example():
|
|
456
487
|
asyncio.run(batch_example())
|
457
488
|
```
|
458
489
|
|
490
|
+
### 图像生成调用示例
|
491
|
+
|
492
|
+
支持 OpenAI DALL-E、Google Vertex AI 和 Google GenAI 图像生成:
|
493
|
+
|
494
|
+
```python
|
495
|
+
from tamar_model_client import TamarModelClient
|
496
|
+
from tamar_model_client.schemas import ModelRequest, UserContext
|
497
|
+
from tamar_model_client.enums import ProviderType, InvokeType, Channel
|
498
|
+
|
499
|
+
client = TamarModelClient()
|
500
|
+
|
501
|
+
# OpenAI DALL-E 图像生成
|
502
|
+
openai_image_request = ModelRequest(
|
503
|
+
provider=ProviderType.OPENAI,
|
504
|
+
channel=Channel.OPENAI,
|
505
|
+
invoke_type=InvokeType.IMAGE_GENERATION,
|
506
|
+
model="dall-e-3",
|
507
|
+
prompt="一只穿着西装的猫在办公室里工作",
|
508
|
+
user_context=UserContext(
|
509
|
+
user_id="test_user",
|
510
|
+
org_id="test_org",
|
511
|
+
client_type="python-sdk"
|
512
|
+
),
|
513
|
+
size="1024x1024",
|
514
|
+
quality="hd",
|
515
|
+
n=1
|
516
|
+
)
|
517
|
+
|
518
|
+
# Google Vertex AI 图像生成
|
519
|
+
vertex_image_request = ModelRequest(
|
520
|
+
provider=ProviderType.GOOGLE,
|
521
|
+
channel=Channel.VERTEXAI,
|
522
|
+
invoke_type=InvokeType.IMAGE_GENERATION,
|
523
|
+
model="imagegeneration@006",
|
524
|
+
prompt="一座美丽的山峰在日出时分",
|
525
|
+
user_context=UserContext(
|
526
|
+
user_id="test_user",
|
527
|
+
org_id="test_org",
|
528
|
+
client_type="python-sdk"
|
529
|
+
),
|
530
|
+
number_of_images=1,
|
531
|
+
aspect_ratio="1:1",
|
532
|
+
safety_filter_level="block_some"
|
533
|
+
)
|
534
|
+
|
535
|
+
# Google GenAI 图像生成(新增功能)
|
536
|
+
genai_image_request = ModelRequest(
|
537
|
+
provider=ProviderType.GOOGLE,
|
538
|
+
channel=Channel.AI_STUDIO,
|
539
|
+
invoke_type=InvokeType.IMAGE_GENERATION_GENAI, # 新增的调用类型
|
540
|
+
model="imagen-3.0-generate-001",
|
541
|
+
prompt="科幻风格的城市夜景,霓虹灯闪烁",
|
542
|
+
user_context=UserContext(
|
543
|
+
user_id="test_user",
|
544
|
+
org_id="test_org",
|
545
|
+
client_type="python-sdk"
|
546
|
+
),
|
547
|
+
config=types.GenerateImagesConfig(
|
548
|
+
number_of_images=1,
|
549
|
+
aspect_ratio="16:9"
|
550
|
+
)
|
551
|
+
)
|
552
|
+
|
553
|
+
# 发送请求
|
554
|
+
for request in [openai_image_request, vertex_image_request, genai_image_request]:
|
555
|
+
response = client.invoke(request)
|
556
|
+
if response.error:
|
557
|
+
print(f"图像生成失败: {response.error}")
|
558
|
+
else:
|
559
|
+
print(f"图像生成成功: {response.content}")
|
560
|
+
```
|
561
|
+
|
459
562
|
### 文件输入示例
|
460
563
|
|
461
564
|
支持处理图像等文件输入(需使用支持多模态的模型,如 gemini-2.0-flash):
|
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
|
|
2
2
|
|
3
3
|
setup(
|
4
4
|
name="tamar-model-client",
|
5
|
-
version="0.2.
|
5
|
+
version="0.2.6",
|
6
6
|
description="A Python SDK for interacting with the Model Manager gRPC service",
|
7
7
|
author="Oscar Ou",
|
8
8
|
author_email="oscar.ou@tamaredge.ai",
|
@@ -17,8 +17,8 @@ setup(
|
|
17
17
|
"pydantic",
|
18
18
|
"PyJWT",
|
19
19
|
"nest_asyncio",
|
20
|
-
"openai",
|
21
|
-
"google-genai",
|
20
|
+
"openai>=1.99.3",
|
21
|
+
"google-genai>=1.29.0",
|
22
22
|
"requests>=2.25.0", # HTTP降级功能(同步)
|
23
23
|
"aiohttp>=3.7.0", # HTTP降级功能(异步)
|
24
24
|
],
|
{tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/core/request_builder.py
RENAMED
@@ -17,6 +17,7 @@ from ..schemas.inputs import (
|
|
17
17
|
UserContext,
|
18
18
|
GoogleGenAiInput,
|
19
19
|
GoogleVertexAIImagesInput,
|
20
|
+
GoogleGenAIImagesInput,
|
20
21
|
OpenAIResponsesInput,
|
21
22
|
OpenAIChatCompletionsInput,
|
22
23
|
OpenAIImagesInput,
|
@@ -32,7 +33,7 @@ class RequestBuilder:
|
|
32
33
|
负责将高级的 ModelRequest 对象转换为 gRPC 协议所需的请求对象,
|
33
34
|
包括参数验证、序列化和提供商特定的字段处理。
|
34
35
|
"""
|
35
|
-
|
36
|
+
|
36
37
|
@staticmethod
|
37
38
|
def get_allowed_fields(provider: ProviderType, invoke_type: InvokeType) -> Set[str]:
|
38
39
|
"""
|
@@ -61,11 +62,13 @@ class RequestBuilder:
|
|
61
62
|
return set(OpenAIImagesInput.model_fields.keys())
|
62
63
|
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.IMAGE_EDIT_GENERATION):
|
63
64
|
return set(OpenAIImagesEditInput.model_fields.keys())
|
65
|
+
case (ProviderType.GOOGLE, InvokeType.IMAGE_GENERATION_GENAI):
|
66
|
+
return set(GoogleGenAIImagesInput.model_fields.keys())
|
64
67
|
case _:
|
65
68
|
raise ValueError(
|
66
69
|
f"Unsupported provider/invoke_type combination: {provider} + {invoke_type}"
|
67
70
|
)
|
68
|
-
|
71
|
+
|
69
72
|
@staticmethod
|
70
73
|
def build_grpc_extra_fields(model_request: ModelRequest) -> Dict[str, Any]:
|
71
74
|
"""
|
@@ -88,31 +91,31 @@ class RequestBuilder:
|
|
88
91
|
model_request.provider,
|
89
92
|
model_request.invoke_type
|
90
93
|
)
|
91
|
-
|
94
|
+
|
92
95
|
# 将 ModelRequest 转换为字典,只包含已设置的字段
|
93
96
|
model_request_dict = model_request.model_dump(exclude_unset=True)
|
94
|
-
|
97
|
+
|
95
98
|
# 构建 gRPC 请求参数
|
96
99
|
grpc_request_kwargs = {}
|
97
100
|
for field in allowed_fields:
|
98
101
|
if field in model_request_dict:
|
99
102
|
value = model_request_dict[field]
|
100
|
-
|
103
|
+
|
101
104
|
# 跳过无效的值
|
102
105
|
if not is_effective_value(value):
|
103
106
|
continue
|
104
|
-
|
107
|
+
|
105
108
|
# 序列化不支持的类型
|
106
109
|
grpc_request_kwargs[field] = serialize_value(value)
|
107
|
-
|
110
|
+
|
108
111
|
# 清理序列化后的参数中的 None 值
|
109
112
|
grpc_request_kwargs = remove_none_from_dict(grpc_request_kwargs)
|
110
|
-
|
113
|
+
|
111
114
|
return grpc_request_kwargs
|
112
|
-
|
115
|
+
|
113
116
|
except Exception as e:
|
114
117
|
raise ValueError(f"构建请求失败: {str(e)}") from e
|
115
|
-
|
118
|
+
|
116
119
|
@staticmethod
|
117
120
|
def build_single_request(model_request: ModelRequest) -> model_service_pb2.ModelRequestItem:
|
118
121
|
"""
|
@@ -129,7 +132,7 @@ class RequestBuilder:
|
|
129
132
|
"""
|
130
133
|
# 构建额外字段
|
131
134
|
extra_fields = RequestBuilder.build_grpc_extra_fields(model_request)
|
132
|
-
|
135
|
+
|
133
136
|
# 创建 gRPC 请求对象
|
134
137
|
return model_service_pb2.ModelRequestItem(
|
135
138
|
provider=model_request.provider.value,
|
@@ -141,11 +144,11 @@ class RequestBuilder:
|
|
141
144
|
client_type=model_request.user_context.client_type or "",
|
142
145
|
extra=extra_fields
|
143
146
|
)
|
144
|
-
|
147
|
+
|
145
148
|
@staticmethod
|
146
149
|
def build_batch_request_item(
|
147
|
-
|
148
|
-
|
150
|
+
batch_item: "BatchModelRequestItem",
|
151
|
+
user_context: "UserContext"
|
149
152
|
) -> model_service_pb2.ModelRequestItem:
|
150
153
|
"""
|
151
154
|
构建批量请求中的单个项目
|
@@ -159,7 +162,7 @@ class RequestBuilder:
|
|
159
162
|
"""
|
160
163
|
# 构建额外字段
|
161
164
|
extra_fields = RequestBuilder.build_grpc_extra_fields(batch_item)
|
162
|
-
|
165
|
+
|
163
166
|
# 添加 custom_id 如果存在
|
164
167
|
if hasattr(batch_item, 'custom_id') and batch_item.custom_id:
|
165
168
|
request_item = model_service_pb2.ModelRequestItem(
|
@@ -184,13 +187,13 @@ class RequestBuilder:
|
|
184
187
|
client_type=user_context.client_type or "",
|
185
188
|
extra=extra_fields
|
186
189
|
)
|
187
|
-
|
190
|
+
|
188
191
|
# 添加 priority 如果存在
|
189
192
|
if hasattr(batch_item, 'priority') and batch_item.priority is not None:
|
190
193
|
request_item.priority = batch_item.priority
|
191
|
-
|
194
|
+
|
192
195
|
return request_item
|
193
|
-
|
196
|
+
|
194
197
|
@staticmethod
|
195
198
|
def build_batch_request(batch_request: BatchModelRequest) -> model_service_pb2.ModelRequest:
|
196
199
|
"""
|
@@ -206,16 +209,16 @@ class RequestBuilder:
|
|
206
209
|
ValueError: 当构建请求失败时
|
207
210
|
"""
|
208
211
|
items = []
|
209
|
-
|
212
|
+
|
210
213
|
for batch_item in batch_request.items:
|
211
214
|
# 为每个请求项构建 gRPC 对象,传入 user_context
|
212
215
|
request_item = RequestBuilder.build_batch_request_item(
|
213
|
-
batch_item,
|
216
|
+
batch_item,
|
214
217
|
batch_request.user_context
|
215
218
|
)
|
216
219
|
items.append(request_item)
|
217
|
-
|
220
|
+
|
218
221
|
# 创建批量请求对象
|
219
222
|
return model_service_pb2.ModelRequest(
|
220
223
|
items=items
|
221
|
-
)
|
224
|
+
)
|
{tamar_model_client-0.2.4 → tamar_model_client-0.2.6}/tamar_model_client/schemas/__init__.py
RENAMED
@@ -2,11 +2,12 @@
|
|
2
2
|
Schema definitions for the API
|
3
3
|
"""
|
4
4
|
|
5
|
-
from .inputs import UserContext, ModelRequest, BatchModelRequestItem, BatchModelRequest
|
5
|
+
from .inputs import UserContext, ModelRequest, BatchModelRequestItem, BatchModelRequest, TamarFileIdInput
|
6
6
|
from .outputs import ModelResponse, BatchModelResponse
|
7
7
|
|
8
8
|
__all__ = [
|
9
9
|
# Model Inputs
|
10
|
+
"TamarFileIdInput",
|
10
11
|
"UserContext",
|
11
12
|
"ModelRequest",
|
12
13
|
"BatchModelRequestItem",
|
@@ -26,6 +26,10 @@ class UserContext(BaseModel):
|
|
26
26
|
client_type: str # 客户端类型,这里记录的是哪个服务请求过来的
|
27
27
|
|
28
28
|
|
29
|
+
class TamarFileIdInput(BaseModel):
|
30
|
+
file_id: str
|
31
|
+
|
32
|
+
|
29
33
|
class GoogleGenAiInput(BaseModel):
|
30
34
|
model: str
|
31
35
|
contents: Union[types.ContentListUnion, types.ContentListUnionDict]
|
@@ -59,6 +63,16 @@ class GoogleVertexAIImagesInput(BaseModel):
|
|
59
63
|
}
|
60
64
|
|
61
65
|
|
66
|
+
class GoogleGenAIImagesInput(BaseModel):
|
67
|
+
model: str
|
68
|
+
prompt: str
|
69
|
+
config: Optional[types.GenerateImagesConfigOrDict] = None
|
70
|
+
|
71
|
+
model_config = {
|
72
|
+
"arbitrary_types_allowed": True
|
73
|
+
}
|
74
|
+
|
75
|
+
|
62
76
|
class OpenAIResponsesInput(BaseModel):
|
63
77
|
background: Optional[bool] | NotGiven = NOT_GIVEN
|
64
78
|
include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN
|
@@ -169,11 +183,11 @@ class OpenAIImagesInput(BaseModel):
|
|
169
183
|
|
170
184
|
|
171
185
|
class OpenAIImagesEditInput(BaseModel):
|
172
|
-
image: Union[FileTypes, List[FileTypes]]
|
186
|
+
image: Union[FileTypes, List[FileTypes], TamarFileIdInput, List[TamarFileIdInput]]
|
173
187
|
prompt: str
|
174
188
|
background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN
|
175
189
|
input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN
|
176
|
-
mask: FileTypes | NotGiven = NOT_GIVEN
|
190
|
+
mask: FileTypes | TamarFileIdInput | NotGiven = NOT_GIVEN
|
177
191
|
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN
|
178
192
|
n: Optional[int] | NotGiven = NOT_GIVEN
|
179
193
|
output_compression: Optional[int] | NotGiven = NOT_GIVEN
|
@@ -264,10 +278,10 @@ class ModelRequestInput(BaseRequest):
|
|
264
278
|
|
265
279
|
# Google GenAI Input
|
266
280
|
contents: Optional[Union[types.ContentListUnion, types.ContentListUnionDict]] = None
|
267
|
-
config: Optional[types.GenerateContentConfigOrDict] = None
|
281
|
+
config: Optional[Union[types.GenerateContentConfigOrDict, types.GenerateImagesConfigOrDict]] = None
|
268
282
|
|
269
283
|
# Images(OpenAI Images / Images Edit / Google Vertex Images 合并)
|
270
|
-
image: Optional[Union[FileTypes, List[FileTypes]]] = None
|
284
|
+
image: Optional[Union[FileTypes, List[FileTypes], TamarFileIdInput, List[TamarFileIdInput]]] = None
|
271
285
|
# background 同名字段合并:Responses 的 bool(后台任务)+ Images 的透明度枚举
|
272
286
|
background: Optional[Union[bool, Literal["transparent", "opaque", "auto"], NotGiven]] = NOT_GIVEN
|
273
287
|
moderation: Optional[Union[Literal["low", "auto"], NotGiven]] = NOT_GIVEN
|
@@ -275,7 +289,7 @@ class ModelRequestInput(BaseRequest):
|
|
275
289
|
output_compression: Optional[Union[int, NotGiven]] = NOT_GIVEN
|
276
290
|
output_format: Optional[Union[Literal["png", "jpeg", "webp"], NotGiven]] = NOT_GIVEN
|
277
291
|
partial_images: Optional[Union[int, NotGiven]] = NOT_GIVEN
|
278
|
-
mask: Union[FileTypes, NotGiven] = NOT_GIVEN
|
292
|
+
mask: Union[FileTypes, TamarFileIdInput, NotGiven] = NOT_GIVEN
|
279
293
|
negative_prompt: Optional[str] = None
|
280
294
|
aspect_ratio: Optional[Literal["1:1", "9:16", "16:9", "4:3", "3:4"]] = None
|
281
295
|
guidance_scale: Optional[float] = None
|
@@ -3,6 +3,7 @@ from pydantic import BaseModel
|
|
3
3
|
from typing import Any
|
4
4
|
import os, mimetypes
|
5
5
|
|
6
|
+
|
6
7
|
def convert_file_field(value: Any) -> Any:
|
7
8
|
def is_file_like(obj):
|
8
9
|
return hasattr(obj, "read") and callable(obj.read)
|
@@ -55,7 +56,7 @@ def validate_fields_by_provider_and_invoke_type(
|
|
55
56
|
"""
|
56
57
|
from tamar_model_client.enums import ProviderType, InvokeType
|
57
58
|
from tamar_model_client.schemas.inputs import GoogleGenAiInput, OpenAIResponsesInput, OpenAIChatCompletionsInput, \
|
58
|
-
OpenAIImagesInput, OpenAIImagesEditInput, GoogleVertexAIImagesInput
|
59
|
+
OpenAIImagesInput, OpenAIImagesEditInput, GoogleVertexAIImagesInput, GoogleGenAIImagesInput
|
59
60
|
|
60
61
|
google_allowed = extra_allowed_fields | set(GoogleGenAiInput.model_fields)
|
61
62
|
openai_responses_allowed = extra_allowed_fields | set(OpenAIResponsesInput.model_fields)
|
@@ -63,9 +64,11 @@ def validate_fields_by_provider_and_invoke_type(
|
|
63
64
|
openai_images_allowed = extra_allowed_fields | set(OpenAIImagesInput.model_fields)
|
64
65
|
openai_images_edit_allowed = extra_allowed_fields | set(OpenAIImagesEditInput.model_fields)
|
65
66
|
google_vertexai_images_allowed = extra_allowed_fields | set(GoogleVertexAIImagesInput.model_fields)
|
67
|
+
google_genai_images_allowed = extra_allowed_fields | set(GoogleGenAIImagesInput.model_fields)
|
66
68
|
|
67
69
|
google_required = {"model", "contents"}
|
68
70
|
google_vertex_required = {"model", "prompt"}
|
71
|
+
google_genai_images_required = {"model", "prompt"}
|
69
72
|
openai_resp_required = {"input", "model"}
|
70
73
|
openai_chat_required = {"messages", "model"}
|
71
74
|
openai_img_required = {"prompt"}
|
@@ -90,6 +93,9 @@ def validate_fields_by_provider_and_invoke_type(
|
|
90
93
|
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.IMAGE_EDIT_GENERATION):
|
91
94
|
allowed = openai_images_edit_allowed
|
92
95
|
required = openai_edit_required
|
96
|
+
case (ProviderType.GOOGLE, InvokeType.IMAGE_GENERATION_GENAI):
|
97
|
+
allowed = google_genai_images_allowed
|
98
|
+
required = google_genai_images_required
|
93
99
|
case _:
|
94
100
|
raise ValueError(f"Unsupported provider/invoke_type: {instance.provider} + {instance.invoke_type}")
|
95
101
|
|