tamar-model-client 0.2.4__py3-none-any.whl → 0.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,6 +17,7 @@ from ..schemas.inputs import (
17
17
  UserContext,
18
18
  GoogleGenAiInput,
19
19
  GoogleVertexAIImagesInput,
20
+ GoogleGenAIImagesInput,
20
21
  OpenAIResponsesInput,
21
22
  OpenAIChatCompletionsInput,
22
23
  OpenAIImagesInput,
@@ -32,7 +33,7 @@ class RequestBuilder:
32
33
  负责将高级的 ModelRequest 对象转换为 gRPC 协议所需的请求对象,
33
34
  包括参数验证、序列化和提供商特定的字段处理。
34
35
  """
35
-
36
+
36
37
  @staticmethod
37
38
  def get_allowed_fields(provider: ProviderType, invoke_type: InvokeType) -> Set[str]:
38
39
  """
@@ -61,11 +62,13 @@ class RequestBuilder:
61
62
  return set(OpenAIImagesInput.model_fields.keys())
62
63
  case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.IMAGE_EDIT_GENERATION):
63
64
  return set(OpenAIImagesEditInput.model_fields.keys())
65
+ case (ProviderType.GOOGLE, InvokeType.IMAGE_GENERATION_GENAI):
66
+ return set(GoogleGenAIImagesInput.model_fields.keys())
64
67
  case _:
65
68
  raise ValueError(
66
69
  f"Unsupported provider/invoke_type combination: {provider} + {invoke_type}"
67
70
  )
68
-
71
+
69
72
  @staticmethod
70
73
  def build_grpc_extra_fields(model_request: ModelRequest) -> Dict[str, Any]:
71
74
  """
@@ -88,31 +91,31 @@ class RequestBuilder:
88
91
  model_request.provider,
89
92
  model_request.invoke_type
90
93
  )
91
-
94
+
92
95
  # 将 ModelRequest 转换为字典,只包含已设置的字段
93
96
  model_request_dict = model_request.model_dump(exclude_unset=True)
94
-
97
+
95
98
  # 构建 gRPC 请求参数
96
99
  grpc_request_kwargs = {}
97
100
  for field in allowed_fields:
98
101
  if field in model_request_dict:
99
102
  value = model_request_dict[field]
100
-
103
+
101
104
  # 跳过无效的值
102
105
  if not is_effective_value(value):
103
106
  continue
104
-
107
+
105
108
  # 序列化不支持的类型
106
109
  grpc_request_kwargs[field] = serialize_value(value)
107
-
110
+
108
111
  # 清理序列化后的参数中的 None 值
109
112
  grpc_request_kwargs = remove_none_from_dict(grpc_request_kwargs)
110
-
113
+
111
114
  return grpc_request_kwargs
112
-
115
+
113
116
  except Exception as e:
114
117
  raise ValueError(f"构建请求失败: {str(e)}") from e
115
-
118
+
116
119
  @staticmethod
117
120
  def build_single_request(model_request: ModelRequest) -> model_service_pb2.ModelRequestItem:
118
121
  """
@@ -129,7 +132,7 @@ class RequestBuilder:
129
132
  """
130
133
  # 构建额外字段
131
134
  extra_fields = RequestBuilder.build_grpc_extra_fields(model_request)
132
-
135
+
133
136
  # 创建 gRPC 请求对象
134
137
  return model_service_pb2.ModelRequestItem(
135
138
  provider=model_request.provider.value,
@@ -141,11 +144,11 @@ class RequestBuilder:
141
144
  client_type=model_request.user_context.client_type or "",
142
145
  extra=extra_fields
143
146
  )
144
-
147
+
145
148
  @staticmethod
146
149
  def build_batch_request_item(
147
- batch_item: "BatchModelRequestItem",
148
- user_context: "UserContext"
150
+ batch_item: "BatchModelRequestItem",
151
+ user_context: "UserContext"
149
152
  ) -> model_service_pb2.ModelRequestItem:
150
153
  """
151
154
  构建批量请求中的单个项目
@@ -159,7 +162,7 @@ class RequestBuilder:
159
162
  """
160
163
  # 构建额外字段
161
164
  extra_fields = RequestBuilder.build_grpc_extra_fields(batch_item)
162
-
165
+
163
166
  # 添加 custom_id 如果存在
164
167
  if hasattr(batch_item, 'custom_id') and batch_item.custom_id:
165
168
  request_item = model_service_pb2.ModelRequestItem(
@@ -184,13 +187,13 @@ class RequestBuilder:
184
187
  client_type=user_context.client_type or "",
185
188
  extra=extra_fields
186
189
  )
187
-
190
+
188
191
  # 添加 priority 如果存在
189
192
  if hasattr(batch_item, 'priority') and batch_item.priority is not None:
190
193
  request_item.priority = batch_item.priority
191
-
194
+
192
195
  return request_item
193
-
196
+
194
197
  @staticmethod
195
198
  def build_batch_request(batch_request: BatchModelRequest) -> model_service_pb2.ModelRequest:
196
199
  """
@@ -206,16 +209,16 @@ class RequestBuilder:
206
209
  ValueError: 当构建请求失败时
207
210
  """
208
211
  items = []
209
-
212
+
210
213
  for batch_item in batch_request.items:
211
214
  # 为每个请求项构建 gRPC 对象,传入 user_context
212
215
  request_item = RequestBuilder.build_batch_request_item(
213
- batch_item,
216
+ batch_item,
214
217
  batch_request.user_context
215
218
  )
216
219
  items.append(request_item)
217
-
220
+
218
221
  # 创建批量请求对象
219
222
  return model_service_pb2.ModelRequest(
220
223
  items=items
221
- )
224
+ )
@@ -9,3 +9,4 @@ class InvokeType(str, Enum):
9
9
  GENERATION = "generation" # 生成类,默认的值
10
10
  IMAGE_GENERATION = "image-generation"
11
11
  IMAGE_EDIT_GENERATION = "image-edit-generation"
12
+ IMAGE_GENERATION_GENAI = "image-generation-genai" # GenAI SDK图像生成
@@ -59,6 +59,16 @@ class GoogleVertexAIImagesInput(BaseModel):
59
59
  }
60
60
 
61
61
 
62
+ class GoogleGenAIImagesInput(BaseModel):
63
+ model: str
64
+ prompt: str
65
+ config: Optional[types.GenerateImagesConfigOrDict] = None
66
+
67
+ model_config = {
68
+ "arbitrary_types_allowed": True
69
+ }
70
+
71
+
62
72
  class OpenAIResponsesInput(BaseModel):
63
73
  background: Optional[bool] | NotGiven = NOT_GIVEN
64
74
  include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN
@@ -264,7 +274,7 @@ class ModelRequestInput(BaseRequest):
264
274
 
265
275
  # Google GenAI Input
266
276
  contents: Optional[Union[types.ContentListUnion, types.ContentListUnionDict]] = None
267
- config: Optional[types.GenerateContentConfigOrDict] = None
277
+ config: Optional[Union[types.GenerateContentConfigOrDict, types.GenerateImagesConfigOrDict]] = None
268
278
 
269
279
  # Images(OpenAI Images / Images Edit / Google Vertex Images 合并)
270
280
  image: Optional[Union[FileTypes, List[FileTypes]]] = None
@@ -3,6 +3,7 @@ from pydantic import BaseModel
3
3
  from typing import Any
4
4
  import os, mimetypes
5
5
 
6
+
6
7
  def convert_file_field(value: Any) -> Any:
7
8
  def is_file_like(obj):
8
9
  return hasattr(obj, "read") and callable(obj.read)
@@ -55,7 +56,7 @@ def validate_fields_by_provider_and_invoke_type(
55
56
  """
56
57
  from tamar_model_client.enums import ProviderType, InvokeType
57
58
  from tamar_model_client.schemas.inputs import GoogleGenAiInput, OpenAIResponsesInput, OpenAIChatCompletionsInput, \
58
- OpenAIImagesInput, OpenAIImagesEditInput, GoogleVertexAIImagesInput
59
+ OpenAIImagesInput, OpenAIImagesEditInput, GoogleVertexAIImagesInput, GoogleGenAIImagesInput
59
60
 
60
61
  google_allowed = extra_allowed_fields | set(GoogleGenAiInput.model_fields)
61
62
  openai_responses_allowed = extra_allowed_fields | set(OpenAIResponsesInput.model_fields)
@@ -63,9 +64,11 @@ def validate_fields_by_provider_and_invoke_type(
63
64
  openai_images_allowed = extra_allowed_fields | set(OpenAIImagesInput.model_fields)
64
65
  openai_images_edit_allowed = extra_allowed_fields | set(OpenAIImagesEditInput.model_fields)
65
66
  google_vertexai_images_allowed = extra_allowed_fields | set(GoogleVertexAIImagesInput.model_fields)
67
+ google_genai_images_allowed = extra_allowed_fields | set(GoogleGenAIImagesInput.model_fields)
66
68
 
67
69
  google_required = {"model", "contents"}
68
70
  google_vertex_required = {"model", "prompt"}
71
+ google_genai_images_required = {"model", "prompt"}
69
72
  openai_resp_required = {"input", "model"}
70
73
  openai_chat_required = {"messages", "model"}
71
74
  openai_img_required = {"prompt"}
@@ -90,6 +93,9 @@ def validate_fields_by_provider_and_invoke_type(
90
93
  case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.IMAGE_EDIT_GENERATION):
91
94
  allowed = openai_images_edit_allowed
92
95
  required = openai_edit_required
96
+ case (ProviderType.GOOGLE, InvokeType.IMAGE_GENERATION_GENAI):
97
+ allowed = google_genai_images_allowed
98
+ required = google_genai_images_required
93
99
  case _:
94
100
  raise ValueError(f"Unsupported provider/invoke_type: {instance.provider} + {instance.invoke_type}")
95
101
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tamar-model-client
3
- Version: 0.2.4
3
+ Version: 0.2.5
4
4
  Summary: A Python SDK for interacting with the Model Manager gRPC service
5
5
  Home-page: http://gitlab.tamaredge.top/project-tap/AgentOS/model-manager-client
6
6
  Author: Oscar Ou
@@ -54,7 +54,7 @@ Dynamic: summary
54
54
 
55
55
  ### 🔌 多服务商支持
56
56
  - **OpenAI** (GPT-3.5/4, DALL-E)
57
- - **Google** (Gemini - AI Studio & Vertex AI)
57
+ - **Google** (Gemini - AI Studio & Vertex AI, Imagen 图像生成)
58
58
  - **Azure OpenAI** (企业级部署)
59
59
  - **Anthropic** (Claude)
60
60
  - **DeepSeek** (深度求索)
@@ -114,7 +114,7 @@ tamar_model_client/
114
114
  │ └── outputs.py # 响应模型(ModelResponse, Usage)
115
115
  ├── 📁 enums/ # 枚举定义
116
116
  │ ├── providers.py # AI 服务商(OpenAI, Google, Azure...)
117
- │ ├── invoke.py # 调用类型(generation, images...)
117
+ │ ├── invoke.py # 调用类型(generation, images, image-generation-genai...)
118
118
  │ └── channel.py # 服务通道(openai, vertexai...)
119
119
  ├── 📁 core/ # 核心功能模块
120
120
  │ ├── base_client.py # 客户端基类(熔断、降级、配置)
@@ -288,6 +288,37 @@ else:
288
288
  print(f"响应: {vertex_response.content}")
289
289
  if vertex_response.usage:
290
290
  print(f"Token 使用情况: {vertex_response.usage}")
291
+
292
+ # Google GenAI 图像生成示例
293
+ from google.genai import types
294
+
295
+ genai_image_request = ModelRequest(
296
+ provider=ProviderType.GOOGLE, # 选择 Google 作为提供商
297
+ channel=Channel.AI_STUDIO, # 使用 AI Studio 渠道
298
+ invoke_type=InvokeType.IMAGE_GENERATION_GENAI, # 使用 GenAI 图像生成调用类型
299
+ model="imagen-3.0-generate-001", # 指定图像生成模型
300
+ prompt="一只可爱的小猫在花园里玩耍", # 图像描述提示词
301
+ user_context=UserContext(
302
+ user_id="test_user",
303
+ org_id="test_org",
304
+ client_type="python-sdk"
305
+ ),
306
+ # 使用 Google GenAI 类型构建配置
307
+ config=types.GenerateImagesConfig(
308
+ number_of_images=1,
309
+ aspect_ratio="1:1",
310
+ safety_filter_level="block_some"
311
+ )
312
+ )
313
+
314
+ # 发送图像生成请求并获取响应
315
+ image_response = client.invoke(genai_image_request)
316
+ if image_response.error:
317
+ print(f"错误: {image_response.error}")
318
+ else:
319
+ print(f"图像生成成功: {image_response.content}")
320
+ if image_response.usage:
321
+ print(f"使用情况: {image_response.usage}")
291
322
  ```
292
323
 
293
324
  ### Azure OpenAI 调用示例
@@ -488,6 +519,78 @@ async def batch_example():
488
519
  asyncio.run(batch_example())
489
520
  ```
490
521
 
522
+ ### 图像生成调用示例
523
+
524
+ 支持 OpenAI DALL-E、Google Vertex AI 和 Google GenAI 图像生成:
525
+
526
+ ```python
527
+ from tamar_model_client import TamarModelClient
528
+ from tamar_model_client.schemas import ModelRequest, UserContext
529
+ from tamar_model_client.enums import ProviderType, InvokeType, Channel
530
+
531
+ client = TamarModelClient()
532
+
533
+ # OpenAI DALL-E 图像生成
534
+ openai_image_request = ModelRequest(
535
+ provider=ProviderType.OPENAI,
536
+ channel=Channel.OPENAI,
537
+ invoke_type=InvokeType.IMAGE_GENERATION,
538
+ model="dall-e-3",
539
+ prompt="一只穿着西装的猫在办公室里工作",
540
+ user_context=UserContext(
541
+ user_id="test_user",
542
+ org_id="test_org",
543
+ client_type="python-sdk"
544
+ ),
545
+ size="1024x1024",
546
+ quality="hd",
547
+ n=1
548
+ )
549
+
550
+ # Google Vertex AI 图像生成
551
+ vertex_image_request = ModelRequest(
552
+ provider=ProviderType.GOOGLE,
553
+ channel=Channel.VERTEXAI,
554
+ invoke_type=InvokeType.IMAGE_GENERATION,
555
+ model="imagegeneration@006",
556
+ prompt="一座美丽的山峰在日出时分",
557
+ user_context=UserContext(
558
+ user_id="test_user",
559
+ org_id="test_org",
560
+ client_type="python-sdk"
561
+ ),
562
+ number_of_images=1,
563
+ aspect_ratio="1:1",
564
+ safety_filter_level="block_some"
565
+ )
566
+
567
+ # Google GenAI 图像生成(新增功能)
568
+ genai_image_request = ModelRequest(
569
+ provider=ProviderType.GOOGLE,
570
+ channel=Channel.AI_STUDIO,
571
+ invoke_type=InvokeType.IMAGE_GENERATION_GENAI, # 新增的调用类型
572
+ model="imagen-3.0-generate-001",
573
+ prompt="科幻风格的城市夜景,霓虹灯闪烁",
574
+ user_context=UserContext(
575
+ user_id="test_user",
576
+ org_id="test_org",
577
+ client_type="python-sdk"
578
+ ),
579
+ config=types.GenerateImagesConfig(
580
+ number_of_images=1,
581
+ aspect_ratio="16:9"
582
+ )
583
+ )
584
+
585
+ # 发送请求
586
+ for request in [openai_image_request, vertex_image_request, genai_image_request]:
587
+ response = client.invoke(request)
588
+ if response.error:
589
+ print(f"图像生成失败: {response.error}")
590
+ else:
591
+ print(f"图像生成成功: {response.content}")
592
+ ```
593
+
491
594
  ### 文件输入示例
492
595
 
493
596
  支持处理图像等文件输入(需使用支持多模态的模型,如 gemini-2.0-flash):
@@ -7,32 +7,32 @@ tamar_model_client/exceptions.py,sha256=EOr4JMYI7hVszRvNYJ1JqsUNpVmd16T2KpJ0MkFT
7
7
  tamar_model_client/json_formatter.py,sha256=XT8XPMKKM2M22tuYR2e1rvWHcpz3UD9iLLgGPsGOjCI,2410
8
8
  tamar_model_client/logging_icons.py,sha256=MRTZ1Xvkep9ce_jdltj54_XZUXvIpQ95soRNmLdJ4qw,1837
9
9
  tamar_model_client/sync_client.py,sha256=FbyjuyDRiXklSS_l5h5fwNxvABI-hpLGiIWAXqhPHoI,53760
10
- tamar_model_client/utils.py,sha256=Kn6pFz9GEC96H4eejEax66AkzvsrXI3WCSDtgDjnVTI,5238
10
+ tamar_model_client/utils.py,sha256=9gJm71UuQhyyBCgo6gvMjv74xepOlw6AiwuSzea2CL0,5595
11
11
  tamar_model_client/core/__init__.py,sha256=RMiZjV1S4csWPLxB_JfdOea8fYPz97Oj3humQSBw1OI,1054
12
12
  tamar_model_client/core/base_client.py,sha256=spb4zjDuPczqnXNlDcIq_bDQ09TOpxeeuX7IxpTS_38,13859
13
13
  tamar_model_client/core/http_fallback.py,sha256=2N7-N_TZrtffDjuv9s3-CD8Xy7qw9AuI5xeWGUnGQ0w,22217
14
14
  tamar_model_client/core/logging_setup.py,sha256=-MXzTR4Ax50H16cbq1jCXbxgayf5fZ0U3o0--fMmxD8,6692
15
- tamar_model_client/core/request_builder.py,sha256=yi8iy2Ps2m4d1YwIFiQLRxTvxQxgEGV576aXnNYRl7E,8507
15
+ tamar_model_client/core/request_builder.py,sha256=aplTEXGgeipn-dRCdUptHYWkT9c4zjKmbmI8Ckbv_sM,8516
16
16
  tamar_model_client/core/request_id_manager.py,sha256=S-Mliaby9zN_bx-B85FvVnttal-w0skkjy2ZvWoQ5vw,3689
17
17
  tamar_model_client/core/response_handler.py,sha256=_q5galAT0_RaUT5C_yZsjg-9VnT9CBjmIASOt28BUmQ,4616
18
18
  tamar_model_client/core/utils.py,sha256=AcbsGfNQEaZLYI4OZJs-BdmJgxAoLUC5LFoiYmji820,5875
19
19
  tamar_model_client/enums/__init__.py,sha256=3cYYn8ztNGBa_pI_5JGRVYf2QX8fkBVWdjID1PLvoBQ,182
20
20
  tamar_model_client/enums/channel.py,sha256=wCzX579nNpTtwzGeS6S3Ls0UzVAgsOlfy4fXMzQTCAw,199
21
- tamar_model_client/enums/invoke.py,sha256=Up87myAg4-0SDJV5a82ggPDpYHSLEtIco8BF_5Ph1nY,322
21
+ tamar_model_client/enums/invoke.py,sha256=4GLwmUnX9Arwvu-lfFBtYr7aLTB7lZgTQvZEJKUGqY4,402
22
22
  tamar_model_client/enums/providers.py,sha256=L_bX75K6KnWURoFizoitZ1Ybza7bmYDqXecNzNpgIrI,165
23
23
  tamar_model_client/generated/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
24
  tamar_model_client/generated/model_service_pb2.py,sha256=RI6wNSmgmylzWPedFfPxx938UzS7kcPR58YTzYshcL8,3066
25
25
  tamar_model_client/generated/model_service_pb2_grpc.py,sha256=k4tIbp3XBxdyuOVR18Ung_4SUryONB51UYf_uUEl6V4,5145
26
26
  tamar_model_client/schemas/__init__.py,sha256=AxuI-TcvA4OMTj2FtK4wAItvz9LrK_293pu3cmMLE7k,394
27
- tamar_model_client/schemas/inputs.py,sha256=ilSZxcnXubX-yndz6X7y_mZnx19f5IGmWoKPSm0L_R8,17069
27
+ tamar_model_client/schemas/inputs.py,sha256=5ZjuWhlIY_geFGL6cnqk2v1ZQ7algyRD2jJ6NiShVt4,17324
28
28
  tamar_model_client/schemas/outputs.py,sha256=M_fcqUtXPJnfiLabHlyA8BorlC5pYkf5KLjXO1ysKIQ,1031
29
29
  tests/__init__.py,sha256=kbmImddLDwdqlkkmkyKtl4bQy_ipe-R8eskpaBylU9w,38
30
30
  tests/stream_hanging_analysis.py,sha256=W3W48IhQbNAR6-xvMpoWZvnWOnr56CTaH4-aORNBuD4,14807
31
31
  tests/test_circuit_breaker.py,sha256=nhEBnyXFjIYjRWlUdu7Z9PnPq48ypbBK6fxN6deHedw,12172
32
- tests/test_google_azure_final.py,sha256=Cx2lfnoj48_7pUjpCYbrx6OLJF4cI79McV24_EYt_8s,55093
32
+ tests/test_google_azure_final.py,sha256=YnTl5_e-C_lay2e0_0z35rpIxRkLs1tTsvcQ3Qww_RU,67077
33
33
  tests/test_logging_issue.py,sha256=JTMbotfHpAEPMBj73pOwxPn-Zn4QVQJX6scMz48FRDQ,2427
34
34
  tests/test_simple.py,sha256=Xf0U-J9_xn_LzUsmYu06suK0_7DrPeko8OHoHldsNxE,7169
35
- tamar_model_client-0.2.4.dist-info/METADATA,sha256=YN-OEy64kB_c1f9sTYRxciIdbn5KSAIeRXECpR7xKLU,41309
36
- tamar_model_client-0.2.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
37
- tamar_model_client-0.2.4.dist-info/top_level.txt,sha256=f1I-S8iWN-cgv4gB8gxRg9jJOTJMumvm4oGKVPfGg6A,25
38
- tamar_model_client-0.2.4.dist-info/RECORD,,
35
+ tamar_model_client-0.2.5.dist-info/METADATA,sha256=d_AzgzHtqfzXlVVHLZU88m3LigTm1oE0Jc_8NIYPggQ,44621
36
+ tamar_model_client-0.2.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
37
+ tamar_model_client-0.2.5.dist-info/top_level.txt,sha256=f1I-S8iWN-cgv4gB8gxRg9jJOTJMumvm4oGKVPfGg6A,25
38
+ tamar_model_client-0.2.5.dist-info/RECORD,,
@@ -35,6 +35,7 @@ try:
35
35
  from tamar_model_client import TamarModelClient, AsyncTamarModelClient
36
36
  from tamar_model_client.schemas import ModelRequest, UserContext
37
37
  from tamar_model_client.enums import ProviderType, InvokeType, Channel
38
+ from google.genai import types
38
39
 
39
40
  # 为了调试,临时启用 SDK 的日志输出
40
41
  # 注意:这会输出 JSON 格式的日志
@@ -145,6 +146,70 @@ def test_azure_openai():
145
146
  print(f"❌ Azure OpenAI 失败: {str(e)}")
146
147
 
147
148
 
149
+ def test_google_genai_image_generation():
150
+ """测试 Google GenAI 图像生成"""
151
+ print("\n🎨 测试 Google GenAI 图像生成...")
152
+
153
+ try:
154
+ client = TamarModelClient()
155
+
156
+ request = ModelRequest(
157
+ provider=ProviderType.GOOGLE,
158
+ channel=Channel.AI_STUDIO,
159
+ invoke_type=InvokeType.IMAGE_GENERATION_GENAI,
160
+ model="imagen-3.0-generate-002",
161
+ prompt="一只可爱的小猫咪在花园里玩耍,阳光透过树叶洒下斑驳的光影",
162
+ user_context=UserContext(
163
+ user_id="test_user",
164
+ org_id="test_org",
165
+ client_type="test_client"
166
+ )
167
+ )
168
+
169
+ response = client.invoke(request)
170
+ print(f"✅ Google GenAI 图像生成成功")
171
+ print(f" 响应类型: {type(response)}")
172
+ if response.content:
173
+ print(f" 图像内容长度: {len(str(response.content))}")
174
+ else:
175
+ print(f" 响应内容: {str(response)[:200]}...")
176
+
177
+ except Exception as e:
178
+ print(f"❌ Google GenAI 图像生成失败: {str(e)}")
179
+
180
+
181
+ def test_google_vertex_ai_image_generation():
182
+ """测试 Google Vertex AI 图像生成 (对比)"""
183
+ print("\n🎨 测试 Google Vertex AI 图像生成...")
184
+
185
+ try:
186
+ client = TamarModelClient()
187
+
188
+ request = ModelRequest(
189
+ provider=ProviderType.GOOGLE,
190
+ channel=Channel.VERTEXAI,
191
+ invoke_type=InvokeType.IMAGE_GENERATION,
192
+ model="imagegeneration@006",
193
+ prompt="一座雄伟的雪山在黄昏时分",
194
+ user_context=UserContext(
195
+ user_id="test_user",
196
+ org_id="test_org",
197
+ client_type="test_client"
198
+ )
199
+ )
200
+
201
+ response = client.invoke(request)
202
+ print(f"✅ Google Vertex AI 图像生成成功")
203
+ print(f" 响应类型: {type(response)}")
204
+ if response.content:
205
+ print(f" 图像内容长度: {len(str(response.content))}")
206
+ else:
207
+ print(f" 响应内容: {str(response)[:200]}...")
208
+
209
+ except Exception as e:
210
+ print(f"❌ Google Vertex AI 图像生成失败: {str(e)}")
211
+
212
+
148
213
  async def test_google_streaming():
149
214
  """测试 Google 流式响应"""
150
215
  print("\n📡 测试 Google 流式响应...")
@@ -223,6 +288,78 @@ async def test_azure_streaming():
223
288
  print(f"❌ Azure 流式响应失败: {str(e)}")
224
289
 
225
290
 
291
+ async def test_google_genai_image_generation_async():
292
+ """测试异步 Google GenAI 图像生成"""
293
+ print("\n🎨 测试异步 Google GenAI 图像生成...")
294
+
295
+ try:
296
+ async with AsyncTamarModelClient() as client:
297
+ request = ModelRequest(
298
+ provider=ProviderType.GOOGLE,
299
+ channel=Channel.AI_STUDIO,
300
+ invoke_type=InvokeType.IMAGE_GENERATION_GENAI,
301
+ model="imagen-3.0-generate-002",
302
+ prompt="现代城市夜景,霓虹灯闪烁,繁华热闹的街道",
303
+ user_context=UserContext(
304
+ user_id="test_user_async",
305
+ org_id="test_org",
306
+ client_type="test_client_async"
307
+ )
308
+ )
309
+
310
+ response = await client.invoke(request)
311
+ print(f"✅ 异步 Google GenAI 图像生成调用成功")
312
+ print(f" 响应类型: {type(response)}")
313
+
314
+ # 对于图像生成,是直接的响应对象,不是流式
315
+ if response.content:
316
+ print(f" 图像内容长度: {len(str(response.content))}")
317
+ print(f" 图像生成成功!")
318
+ elif response.error:
319
+ print(f" 错误: {response.error}")
320
+ else:
321
+ print(f" 响应内容: {str(response)[:200]}...")
322
+
323
+ except Exception as e:
324
+ print(f"❌ 异步 Google GenAI 图像生成失败: {str(e)}")
325
+
326
+
327
+ async def test_google_vertex_ai_image_generation_async():
328
+ """测试异步 Google Vertex AI 图像生成 (对比)"""
329
+ print("\n🎨 测试异步 Google Vertex AI 图像生成...")
330
+
331
+ try:
332
+ async with AsyncTamarModelClient() as client:
333
+ request = ModelRequest(
334
+ provider=ProviderType.GOOGLE,
335
+ channel=Channel.VERTEXAI,
336
+ invoke_type=InvokeType.IMAGE_GENERATION,
337
+ model="imagegeneration@006",
338
+ prompt="宁静的湖泊倒映着夕阳,周围环绕着青山绿树",
339
+ user_context=UserContext(
340
+ user_id="test_user_async",
341
+ org_id="test_org",
342
+ client_type="test_client_async"
343
+ )
344
+ )
345
+
346
+ response = await client.invoke(request)
347
+ print(f"✅ 异步 Google Vertex AI 图像生成调用成功")
348
+ print(f" 响应类型: {type(response)}")
349
+
350
+ # 对于图像生成,是直接的响应对象,不是流式
351
+ if response.content:
352
+ print(f" 图像内容长度: {len(str(response.content))}")
353
+ print(f" 图像生成成功!")
354
+ elif response.error:
355
+ print(f" 错误: {response.error}")
356
+ else:
357
+ print(f" 响应内容: {str(response)[:200]}...")
358
+
359
+ except Exception as e:
360
+ print(f"❌ 异步 Google Vertex AI 图像生成失败: {str(e)}")
361
+
362
+
226
363
  def test_sync_batch_requests():
227
364
  """测试同步批量请求"""
228
365
  print("\n📦 测试同步批量请求...")
@@ -332,6 +469,19 @@ async def test_batch_requests():
332
469
  ],
333
470
  custom_id="google-vertex-ai-1",
334
471
  ),
472
+ # Google GenAI 图像生成请求
473
+ BatchModelRequestItem(
474
+ provider=ProviderType.GOOGLE,
475
+ channel=Channel.AI_STUDIO,
476
+ invoke_type=InvokeType.IMAGE_GENERATION_GENAI,
477
+ model="imagen-3.0-generate-001",
478
+ prompt="一朵美丽的玫瑰花在阳光下绽放",
479
+ config=types.GenerateImagesConfig(
480
+ number_of_images=1,
481
+ aspect_ratio="1:1"
482
+ ),
483
+ custom_id="google-genai-image-1",
484
+ ),
335
485
  # Azure OpenAI 请求
336
486
  BatchModelRequestItem(
337
487
  provider=ProviderType.AZURE,
@@ -378,6 +528,120 @@ async def test_batch_requests():
378
528
  print(f"❌ 批量请求失败: {str(e)}")
379
529
 
380
530
 
531
+ async def test_image_generation_batch():
532
+ """测试图像生成批量请求 - 同时测试 GenAI、Vertex AI 图像生成"""
533
+ print("\n🖼️ 测试图像生成批量请求...")
534
+
535
+ try:
536
+ from tamar_model_client.schemas import BatchModelRequest, BatchModelRequestItem
537
+
538
+ async with AsyncTamarModelClient() as client:
539
+ # 构建图像生成批量请求
540
+ batch_request = BatchModelRequest(
541
+ user_context=UserContext(
542
+ user_id="test_image_batch",
543
+ org_id="test_org",
544
+ client_type="image_test_client"
545
+ ),
546
+ items=[
547
+ # Google GenAI 图像生成请求 1
548
+ BatchModelRequestItem(
549
+ provider=ProviderType.GOOGLE,
550
+ channel=Channel.AI_STUDIO,
551
+ invoke_type=InvokeType.IMAGE_GENERATION_GENAI,
552
+ model="imagen-3.0-generate-001",
553
+ prompt="一只可爱的小狗在公园里奔跑",
554
+ config=types.GenerateImagesConfig(
555
+ number_of_images=1,
556
+ aspect_ratio="1:1",
557
+ safety_filter_level="block_some"
558
+ ),
559
+ custom_id="genai-dog-1",
560
+ ),
561
+ # Google GenAI 图像生成请求 2
562
+ BatchModelRequestItem(
563
+ provider=ProviderType.GOOGLE,
564
+ channel=Channel.AI_STUDIO,
565
+ invoke_type=InvokeType.IMAGE_GENERATION_GENAI,
566
+ model="imagen-3.0-generate-001",
567
+ prompt="美丽的樱花盛开在春天的公园里",
568
+ config=types.GenerateImagesConfig(
569
+ number_of_images=1,
570
+ aspect_ratio="16:9"
571
+ ),
572
+ custom_id="genai-sakura-1",
573
+ ),
574
+ # Google Vertex AI 图像生成请求 1
575
+ BatchModelRequestItem(
576
+ provider=ProviderType.GOOGLE,
577
+ channel=Channel.VERTEXAI,
578
+ invoke_type=InvokeType.IMAGE_GENERATION,
579
+ model="imagegeneration@006",
580
+ prompt="壮丽的山峦在夕阳西下时的景色",
581
+ number_of_images=1,
582
+ aspect_ratio="16:9",
583
+ safety_filter_level="block_some",
584
+ custom_id="vertex-mountain-1",
585
+ ),
586
+ # Google Vertex AI 图像生成请求 2
587
+ BatchModelRequestItem(
588
+ provider=ProviderType.GOOGLE,
589
+ channel=Channel.VERTEXAI,
590
+ invoke_type=InvokeType.IMAGE_GENERATION,
591
+ model="imagegeneration@006",
592
+ prompt="宁静的海滩上有椰子树和海浪",
593
+ number_of_images=1,
594
+ aspect_ratio="1:1",
595
+ safety_filter_level="block_some",
596
+ custom_id="vertex-beach-1",
597
+ )
598
+ ]
599
+ )
600
+
601
+ # 执行批量图像生成请求
602
+ print(f" 发送批量图像生成请求 (共{len(batch_request.items)}个)...")
603
+ batch_response = await client.invoke_batch(batch_request)
604
+
605
+ print(f"✅ 批量图像生成请求成功")
606
+ print(f" 请求数量: {len(batch_request.items)}")
607
+ print(f" 响应数量: {len(batch_response.responses)}")
608
+ print(f" 批量请求ID: {batch_response.request_id}")
609
+
610
+ # 详细显示每个图像生成结果
611
+ genai_success = 0
612
+ vertex_success = 0
613
+ total_errors = 0
614
+
615
+ for i, response in enumerate(batch_response.responses):
616
+ print(f"\n 图像生成 {i + 1}:")
617
+ print(f" - custom_id: {response.custom_id}")
618
+ print(f" - 有错误: {'是' if response.error else '否'}")
619
+
620
+ if response.error:
621
+ total_errors += 1
622
+ print(f" - 错误信息: {response.error}")
623
+ else:
624
+ if response.content:
625
+ print(f" - 图像内容长度: {len(str(response.content))}")
626
+ print(f" - ✅ 图像生成成功!")
627
+
628
+ # 统计不同类型的成功数
629
+ if "genai" in response.custom_id:
630
+ genai_success += 1
631
+ elif "vertex" in response.custom_id:
632
+ vertex_success += 1
633
+ else:
634
+ print(f" - 响应预览: {str(response)[:100]}...")
635
+
636
+ print(f"\n📊 图像生成批量测试统计:")
637
+ print(f" - GenAI 图像生成成功: {genai_success}/2")
638
+ print(f" - Vertex AI 图像生成成功: {vertex_success}/2")
639
+ print(f" - 总错误数: {total_errors}")
640
+
641
+ except Exception as e:
642
+ print(f"❌ 批量图像生成请求失败: {str(e)}")
643
+
644
+
381
645
  def test_concurrent_requests(num_requests: int = 150):
382
646
  """测试并发请求
383
647
 
@@ -513,7 +777,7 @@ async def test_async_concurrent_requests(num_requests: int = 150):
513
777
  failed_requests = 0
514
778
  request_times: List[float] = []
515
779
  errors: Dict[str, int] = {}
516
- trace_id = "8885588866668888886666888888866666668888"
780
+ trace_id = "88888888888888888333333888888883333388888"
517
781
 
518
782
  # 异步锁
519
783
  stats_lock = asyncio.Lock()
@@ -542,7 +806,7 @@ async def test_async_concurrent_requests(num_requests: int = 150):
542
806
  config={"temperature": 0.1}
543
807
  )
544
808
 
545
- response = await client.invoke(request, timeout=300000.0)
809
+ response = await client.invoke(request, timeout=300000.0, request_id=trace_id)
546
810
  duration = time.time() - start_time
547
811
  return (True, duration, "")
548
812
 
@@ -635,26 +899,26 @@ async def test_async_batch_with_circuit_breaker_v2(num_requests: int = 10):
635
899
  num_requests: 要发送的请求数,默认10个
636
900
  """
637
901
  print(f"\n🔥 测试熔断器功能 - 改进版 ({num_requests} 个独立请求)...")
638
-
902
+
639
903
  # 保存原始环境变量
640
904
  import os
641
905
  original_env = {}
642
- env_vars = ['MODEL_CLIENT_RESILIENT_ENABLED', 'MODEL_CLIENT_HTTP_FALLBACK_URL',
906
+ env_vars = ['MODEL_CLIENT_RESILIENT_ENABLED', 'MODEL_CLIENT_HTTP_FALLBACK_URL',
643
907
  'MODEL_CLIENT_CIRCUIT_BREAKER_THRESHOLD', 'MODEL_CLIENT_CIRCUIT_BREAKER_TIMEOUT']
644
908
  for var in env_vars:
645
909
  original_env[var] = os.environ.get(var)
646
-
910
+
647
911
  # 设置环境变量以启用熔断器和HTTP fallback
648
912
  os.environ['MODEL_CLIENT_RESILIENT_ENABLED'] = 'true'
649
913
  os.environ['MODEL_CLIENT_HTTP_FALLBACK_URL'] = 'http://localhost:8000'
650
914
  os.environ['MODEL_CLIENT_CIRCUIT_BREAKER_THRESHOLD'] = '3' # 3次失败后触发熔断
651
915
  os.environ['MODEL_CLIENT_CIRCUIT_BREAKER_TIMEOUT'] = '30' # 熔断器30秒后恢复
652
-
916
+
653
917
  print(f" 环境变量设置:")
654
918
  print(f" - MODEL_CLIENT_RESILIENT_ENABLED: {os.environ.get('MODEL_CLIENT_RESILIENT_ENABLED')}")
655
919
  print(f" - MODEL_CLIENT_HTTP_FALLBACK_URL: {os.environ.get('MODEL_CLIENT_HTTP_FALLBACK_URL')}")
656
- print(f" - 熔断阈值: 1 次失败")
657
-
920
+ print(f" - 熔断阈值: 3 次失败")
921
+
658
922
  # 统计变量
659
923
  total_requests = 0
660
924
  successful_requests = 0
@@ -663,24 +927,25 @@ async def test_async_batch_with_circuit_breaker_v2(num_requests: int = 10):
663
927
  http_fallback_used = 0
664
928
  request_times: List[float] = []
665
929
  errors: Dict[str, int] = {}
666
-
930
+
667
931
  try:
668
932
  # 创建一个共享的异步客户端(启用熔断器)
669
933
  async with AsyncTamarModelClient() as client:
670
934
  print(f"\n 熔断器配置:")
671
935
  print(f" - 启用状态: {getattr(client, 'resilient_enabled', False)}")
672
936
  print(f" - HTTP Fallback URL: {getattr(client, 'http_fallback_url', 'None')}")
673
-
937
+
674
938
  for i in range(num_requests):
675
939
  start_time = time.time()
676
-
940
+
677
941
  try:
678
942
  # 前4个请求使用错误的model来触发失败
679
- if i < 6:
943
+ if i < 4:
680
944
  request = ModelRequest(
681
- provider=ProviderType.OPENAI,
945
+ provider=ProviderType.GOOGLE,
946
+ invoke_type=InvokeType.GENERATION,
682
947
  model="invalid-model-to-trigger-error", # 无效模型
683
- input=f"测试失败请求 {i + 1}",
948
+ contents=f"测试失败请求 {i+1}",
684
949
  user_context=UserContext(
685
950
  user_id=f"circuit_test_{i}",
686
951
  org_id="test_org_circuit",
@@ -691,8 +956,9 @@ async def test_async_batch_with_circuit_breaker_v2(num_requests: int = 10):
691
956
  # 后续请求使用正确的model
692
957
  request = ModelRequest(
693
958
  provider=ProviderType.GOOGLE,
959
+ invoke_type=InvokeType.GENERATION,
694
960
  model="tamar-google-gemini-flash-lite",
695
- contents=f"测试请求 {i + 1}: 计算 {i} + {i}",
961
+ contents=f"测试请求 {i+1}: 计算 {i} + {i}",
696
962
  user_context=UserContext(
697
963
  user_id=f"circuit_test_{i}",
698
964
  org_id="test_org_circuit",
@@ -700,17 +966,17 @@ async def test_async_batch_with_circuit_breaker_v2(num_requests: int = 10):
700
966
  ),
701
967
  config={"temperature": 0.1}
702
968
  )
703
-
704
- print(f"\n 📤 发送请求 {i + 1}/{num_requests}...")
969
+
970
+ print(f"\n 📤 发送请求 {i+1}/{num_requests}...")
705
971
  response = await client.invoke(request, timeout=10000)
706
-
972
+
707
973
  duration = time.time() - start_time
708
974
  request_times.append(duration)
709
975
  total_requests += 1
710
976
  successful_requests += 1
711
-
712
- print(f" ✅ 请求 {i + 1} 成功 - 耗时: {duration:.2f}秒")
713
-
977
+
978
+ print(f" ✅ 请求 {i+1} 成功 - 耗时: {duration:.2f}秒")
979
+
714
980
  # 检查是否通过HTTP fallback
715
981
  if hasattr(client, 'resilient_enabled') and client.resilient_enabled:
716
982
  try:
@@ -720,19 +986,19 @@ async def test_async_batch_with_circuit_breaker_v2(num_requests: int = 10):
720
986
  print(f" (通过HTTP fallback)")
721
987
  except:
722
988
  pass
723
-
989
+
724
990
  except Exception as e:
725
991
  duration = time.time() - start_time
726
992
  request_times.append(duration)
727
993
  total_requests += 1
728
994
  failed_requests += 1
729
-
995
+
730
996
  error_type = type(e).__name__
731
997
  errors[error_type] = errors.get(error_type, 0) + 1
732
-
733
- print(f" ❌ 请求 {i + 1} 失败: {error_type} - {str(e)[:100]}")
998
+
999
+ print(f" ❌ 请求 {i+1} 失败: {error_type} - {str(e)[:100]}")
734
1000
  print(f" 耗时: {duration:.2f}秒")
735
-
1001
+
736
1002
  # 检查熔断器状态
737
1003
  if hasattr(client, 'resilient_enabled') and client.resilient_enabled:
738
1004
  try:
@@ -740,28 +1006,28 @@ async def test_async_batch_with_circuit_breaker_v2(num_requests: int = 10):
740
1006
  if metrics and 'circuit_breaker' in metrics:
741
1007
  state = metrics['circuit_breaker']['state']
742
1008
  failures = metrics['circuit_breaker']['failure_count']
743
-
1009
+
744
1010
  if state == 'open' and not circuit_breaker_opened:
745
1011
  circuit_breaker_opened = True
746
1012
  print(f" 🔻 熔断器已打开!失败次数: {failures}")
747
-
1013
+
748
1014
  print(f" 熔断器: {state}, 失败计数: {failures}")
749
1015
  except Exception as e:
750
1016
  print(f" 获取熔断器状态失败: {e}")
751
-
1017
+
752
1018
  # 请求之间短暂等待
753
1019
  await asyncio.sleep(0.2)
754
-
1020
+
755
1021
  # 最终统计
756
1022
  print(f"\n📊 熔断器测试结果:")
757
1023
  print(f" 总请求数: {total_requests}")
758
1024
  print(f" 成功请求: {successful_requests}")
759
1025
  print(f" 失败请求: {failed_requests}")
760
-
1026
+
761
1027
  print(f"\n 🔥 熔断器统计:")
762
1028
  print(f" - 熔断器是否触发: {'是' if circuit_breaker_opened else '否'}")
763
1029
  print(f" - HTTP fallback使用次数: {http_fallback_used}")
764
-
1030
+
765
1031
  # 获取最终状态
766
1032
  if hasattr(client, 'resilient_enabled') and client.resilient_enabled:
767
1033
  try:
@@ -771,17 +1037,17 @@ async def test_async_batch_with_circuit_breaker_v2(num_requests: int = 10):
771
1037
  print(f" - 总失败次数: {final_metrics['circuit_breaker']['failure_count']}")
772
1038
  except Exception as e:
773
1039
  print(f" - 获取最终状态失败: {e}")
774
-
1040
+
775
1041
  if errors:
776
1042
  print(f"\n 错误统计:")
777
1043
  for error_type, count in sorted(errors.items(), key=lambda x: x[1], reverse=True):
778
1044
  print(f" - {error_type}: {count} 次")
779
-
1045
+
780
1046
  except Exception as e:
781
1047
  print(f"❌ 测试失败: {str(e)}")
782
1048
  import traceback
783
1049
  traceback.print_exc()
784
-
1050
+
785
1051
  finally:
786
1052
  # 恢复原始环境变量
787
1053
  for var, value in original_env.items():
@@ -1205,34 +1471,36 @@ async def main():
1205
1471
 
1206
1472
  try:
1207
1473
  # 同步测试
1208
- test_google_ai_studio()
1209
- test_google_vertex_ai()
1210
- test_azure_openai()
1211
-
1474
+ # test_google_ai_studio()
1475
+ # test_google_vertex_ai()
1476
+ # test_azure_openai()
1477
+
1478
+ # 新增:图像生成测试
1479
+ # test_google_genai_image_generation()
1480
+ # test_google_vertex_ai_image_generation()
1481
+
1212
1482
  # 同步批量测试
1213
- test_sync_batch_requests()
1483
+ # test_sync_batch_requests()
1214
1484
 
1215
1485
  # 异步流式测试
1216
- await asyncio.wait_for(test_google_streaming(), timeout=60.0)
1217
- await asyncio.wait_for(test_azure_streaming(), timeout=60.0)
1218
-
1219
- # 异步批量测试
1220
- await asyncio.wait_for(test_batch_requests(), timeout=120.0)
1486
+ # await asyncio.wait_for(test_google_streaming(), timeout=60.0)
1487
+ # await asyncio.wait_for(test_azure_streaming(), timeout=60.0)
1488
+
1489
+ # 新增:异步图像生成测试
1490
+ # await asyncio.wait_for(test_google_genai_image_generation_async(), timeout=120.0)
1491
+ # await asyncio.wait_for(test_google_vertex_ai_image_generation_async(), timeout=120.0)
1492
+ #
1493
+ # # 异步批量测试
1494
+ # await asyncio.wait_for(test_batch_requests(), timeout=120.0)
1495
+ #
1496
+ # # 新增:图像生成批量测试
1497
+ # await asyncio.wait_for(test_image_generation_batch(), timeout=180.0)
1221
1498
 
1222
1499
  # 同步并发测试
1223
- test_concurrent_requests(2) # 测试150个并发请求
1224
-
1500
+ # test_concurrent_requests(2) # 测试150个并发请求
1501
+ #
1225
1502
  # # 异步并发测试
1226
- await test_async_concurrent_requests(2) # 测试50个异步并发请求(复用连接)
1227
-
1228
- # 异步并发测试 - 独立客户端模式
1229
- # await test_async_concurrent_requests_independent_clients(30) # 测试30个独立客户端并发请求
1230
-
1231
- # 异步批量测试 - 熔断器模式(原版)
1232
- # await test_async_batch_with_circuit_breaker(10, 5) # 测试5个批量请求,每批10个请求
1233
-
1234
- # 熔断器测试 - 改进版(使用单个请求)
1235
- # await test_async_batch_with_circuit_breaker_v2(10) # 测试10个独立请求触发熔断
1503
+ # await test_async_concurrent_requests(2) # 测试50个异步并发请求(复用连接)
1236
1504
 
1237
1505
  print("\n✅ 测试完成")
1238
1506