tamar-model-client 0.2.5__tar.gz → 0.2.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/PKG-INFO +3 -3
  2. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/setup.py +3 -3
  3. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/__init__.py +2 -0
  4. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/async_client.py +1 -1
  5. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/core/base_client.py +4 -0
  6. tamar_model_client-0.2.7/tamar_model_client/core/response_handler.py +219 -0
  7. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/schemas/__init__.py +2 -1
  8. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/schemas/inputs.py +8 -4
  9. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/schemas/outputs.py +19 -0
  10. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/sync_client.py +1 -1
  11. tamar_model_client-0.2.7/tamar_model_client/tool_call_helper.py +169 -0
  12. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client.egg-info/PKG-INFO +3 -3
  13. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client.egg-info/SOURCES.txt +3 -1
  14. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client.egg-info/requires.txt +2 -2
  15. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tests/test_google_azure_final.py +21 -21
  16. tamar_model_client-0.2.7/tests/test_tool_call_enhancement.py +571 -0
  17. tamar_model_client-0.2.5/tamar_model_client/core/response_handler.py +0 -136
  18. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/README.md +0 -0
  19. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/setup.cfg +0 -0
  20. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/auth.py +0 -0
  21. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/circuit_breaker.py +0 -0
  22. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/core/__init__.py +0 -0
  23. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/core/http_fallback.py +0 -0
  24. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/core/logging_setup.py +0 -0
  25. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/core/request_builder.py +0 -0
  26. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/core/request_id_manager.py +0 -0
  27. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/core/utils.py +0 -0
  28. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/enums/__init__.py +0 -0
  29. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/enums/channel.py +0 -0
  30. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/enums/invoke.py +0 -0
  31. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/enums/providers.py +0 -0
  32. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/error_handler.py +0 -0
  33. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/exceptions.py +0 -0
  34. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/generated/__init__.py +0 -0
  35. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/generated/model_service_pb2.py +0 -0
  36. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/generated/model_service_pb2_grpc.py +0 -0
  37. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/json_formatter.py +0 -0
  38. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/logging_icons.py +0 -0
  39. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client/utils.py +0 -0
  40. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client.egg-info/dependency_links.txt +0 -0
  41. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tamar_model_client.egg-info/top_level.txt +0 -0
  42. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tests/__init__.py +0 -0
  43. {tamar_model_client-0.2.5 → tamar_model_client-0.2.7}/tests/test_circuit_breaker.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tamar-model-client
3
- Version: 0.2.5
3
+ Version: 0.2.7
4
4
  Summary: A Python SDK for interacting with the Model Manager gRPC service
5
5
  Home-page: http://gitlab.tamaredge.top/project-tap/AgentOS/model-manager-client
6
6
  Author: Oscar Ou
@@ -16,8 +16,8 @@ Requires-Dist: grpcio-tools~=1.67.1
16
16
  Requires-Dist: pydantic
17
17
  Requires-Dist: PyJWT
18
18
  Requires-Dist: nest_asyncio
19
- Requires-Dist: openai
20
- Requires-Dist: google-genai
19
+ Requires-Dist: openai>=1.99.3
20
+ Requires-Dist: google-genai>=1.29.0
21
21
  Requires-Dist: requests>=2.25.0
22
22
  Requires-Dist: aiohttp>=3.7.0
23
23
  Dynamic: author
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name="tamar-model-client",
5
- version="0.2.5",
5
+ version="0.2.7",
6
6
  description="A Python SDK for interacting with the Model Manager gRPC service",
7
7
  author="Oscar Ou",
8
8
  author_email="oscar.ou@tamaredge.ai",
@@ -17,8 +17,8 @@ setup(
17
17
  "pydantic",
18
18
  "PyJWT",
19
19
  "nest_asyncio",
20
- "openai",
21
- "google-genai",
20
+ "openai>=1.99.3",
21
+ "google-genai>=1.29.0",
22
22
  "requests>=2.25.0", # HTTP降级功能(同步)
23
23
  "aiohttp>=3.7.0", # HTTP降级功能(异步)
24
24
  ],
@@ -3,6 +3,7 @@ from .async_client import AsyncTamarModelClient
3
3
  from .exceptions import ModelManagerClientError, ConnectionError, ValidationError
4
4
  from .json_formatter import JSONFormatter
5
5
  from . import logging_icons
6
+ from .tool_call_helper import ToolCallHelper
6
7
 
7
8
  __all__ = [
8
9
  "TamarModelClient",
@@ -12,4 +13,5 @@ __all__ = [
12
13
  "ValidationError",
13
14
  "JSONFormatter",
14
15
  "logging_icons",
16
+ "ToolCallHelper",
15
17
  ]
@@ -623,7 +623,7 @@ class AsyncTamarModelClient(BaseClient, AsyncHttpFallbackMixin):
623
623
  origin_request_id
624
624
  )
625
625
  stream_iter = self.stub.Invoke(request, metadata=fresh_metadata, timeout=invoke_timeout).__aiter__()
626
- chunk_timeout = 30.0 # 单个数据块的超时时间
626
+ chunk_timeout = self.stream_chunk_timeout # 单个数据块的超时时间
627
627
 
628
628
  try:
629
629
  while True:
@@ -296,6 +296,10 @@ class BaseClient(ABC):
296
296
  if hasattr(grpc.StatusCode, error_name):
297
297
  self.never_fallback_errors.add(getattr(grpc.StatusCode, error_name))
298
298
 
299
+ # 流式响应单个数据块的超时时间(秒)
300
+ # AI模型生成可能需要更长时间,默认设置为120秒
301
+ self.stream_chunk_timeout = float(os.getenv('MODEL_CLIENT_STREAM_CHUNK_TIMEOUT', '120.0'))
302
+
299
303
  if self.fast_fallback_enabled:
300
304
  self.logger.info(
301
305
  "🚀 Fast fallback enabled",
@@ -0,0 +1,219 @@
1
+ """
2
+ Response handling logic for Tamar Model Client
3
+
4
+ This module provides utilities for processing gRPC responses and
5
+ converting them to client response objects.
6
+ """
7
+
8
+ import json
9
+ from typing import Optional, Dict, Any
10
+
11
+ from ..schemas import ModelResponse, BatchModelResponse
12
+
13
+
14
+ class ResponseHandler:
15
+ """
16
+ 响应处理器
17
+
18
+ 负责将 gRPC 响应转换为客户端响应对象,
19
+ 包括 JSON 解析、错误处理和数据结构转换。
20
+ """
21
+
22
+ @staticmethod
23
+ def build_model_response(grpc_response) -> ModelResponse:
24
+ """
25
+ 从 gRPC 响应构建增强的 ModelResponse 对象
26
+
27
+ 新增功能:
28
+ 1. 自动提取 tool_calls(对标 OpenAI SDK)
29
+ 2. 提取 finish_reason(对标 OpenAI SDK)
30
+ 3. 支持多种 provider 格式转换
31
+
32
+ Args:
33
+ grpc_response: gRPC 服务返回的响应对象
34
+
35
+ Returns:
36
+ ModelResponse: 增强的客户端响应对象
37
+ """
38
+ raw_response = ResponseHandler._parse_json_field(grpc_response.raw_response)
39
+
40
+ # 提取 tool_calls 和 finish_reason
41
+ tool_calls = None
42
+ finish_reason = None
43
+
44
+ if raw_response and isinstance(raw_response, dict):
45
+ # OpenAI/Azure OpenAI 格式
46
+ if 'choices' in raw_response and raw_response['choices']:
47
+ choice = raw_response['choices'][0]
48
+
49
+ # 提取 tool_calls
50
+ if 'message' in choice and 'tool_calls' in choice['message']:
51
+ tool_calls = choice['message']['tool_calls']
52
+
53
+ # 提取 finish_reason
54
+ if 'finish_reason' in choice:
55
+ finish_reason = choice['finish_reason']
56
+
57
+ # Google AI 格式适配
58
+ elif 'candidates' in raw_response and raw_response['candidates']:
59
+ candidate = raw_response['candidates'][0]
60
+
61
+ # Google 格式的 function calls 映射
62
+ if 'content' in candidate and 'parts' in candidate['content']:
63
+ parts = candidate['content']['parts']
64
+ google_tool_calls = []
65
+
66
+ for i, part in enumerate(parts):
67
+ if 'functionCall' in part:
68
+ # 转换为 OpenAI 兼容格式
69
+ function_call = part['functionCall']
70
+ google_tool_calls.append({
71
+ 'id': f"call_{i}_{function_call.get('name', 'unknown')}",
72
+ 'type': 'function',
73
+ 'function': {
74
+ 'name': function_call.get('name', ''),
75
+ 'arguments': json.dumps(function_call.get('args', {}))
76
+ }
77
+ })
78
+
79
+ if google_tool_calls:
80
+ tool_calls = google_tool_calls
81
+
82
+ # Google 的 finish_reason
83
+ if 'finishReason' in candidate:
84
+ # 映射 Google 格式到标准格式
85
+ google_reason = candidate['finishReason']
86
+ finish_reason_mapping = {
87
+ 'STOP': 'stop',
88
+ 'MAX_TOKENS': 'length',
89
+ 'SAFETY': 'content_filter',
90
+ 'RECITATION': 'content_filter'
91
+ }
92
+ finish_reason = finish_reason_mapping.get(google_reason, google_reason.lower())
93
+
94
+ # 如果有工具调用,设置 finish_reason 为 tool_calls
95
+ if tool_calls:
96
+ finish_reason = 'tool_calls'
97
+
98
+ return ModelResponse(
99
+ content=grpc_response.content,
100
+ usage=ResponseHandler._parse_json_field(grpc_response.usage),
101
+ error=grpc_response.error or None,
102
+ raw_response=raw_response,
103
+ request_id=grpc_response.request_id if grpc_response.request_id else None,
104
+ tool_calls=tool_calls,
105
+ finish_reason=finish_reason
106
+ )
107
+
108
+ @staticmethod
109
+ def build_batch_response(grpc_response) -> BatchModelResponse:
110
+ """
111
+ 从 gRPC 批量响应构建 BatchModelResponse 对象
112
+
113
+ Args:
114
+ grpc_response: gRPC 服务返回的批量响应对象
115
+
116
+ Returns:
117
+ BatchModelResponse: 客户端批量响应对象
118
+ """
119
+ responses = []
120
+ for response_item in grpc_response.items:
121
+ model_response = ResponseHandler.build_model_response(response_item)
122
+ responses.append(model_response)
123
+
124
+ return BatchModelResponse(
125
+ responses=responses,
126
+ request_id=grpc_response.request_id if grpc_response.request_id else None
127
+ )
128
+
129
+ @staticmethod
130
+ def _parse_json_field(json_str: Optional[str]) -> Optional[Dict[str, Any]]:
131
+ """
132
+ 安全地解析 JSON 字符串
133
+
134
+ Args:
135
+ json_str: 待解析的 JSON 字符串
136
+
137
+ Returns:
138
+ Optional[Dict[str, Any]]: 解析后的字典,或 None(如果输入为空)
139
+ """
140
+ if not json_str:
141
+ return None
142
+
143
+ try:
144
+ return json.loads(json_str)
145
+ except json.JSONDecodeError:
146
+ # 如果解析失败,返回原始字符串作为错误信息
147
+ return {"error": "JSON parse error", "raw": json_str}
148
+
149
+ @staticmethod
150
+ def build_log_data(
151
+ model_request,
152
+ response: Optional[ModelResponse] = None,
153
+ duration: Optional[float] = None,
154
+ error: Optional[Exception] = None,
155
+ stream_stats: Optional[Dict[str, Any]] = None
156
+ ) -> Dict[str, Any]:
157
+ """
158
+ 构建日志数据
159
+
160
+ 为请求和响应日志构建结构化的数据字典。
161
+
162
+ Args:
163
+ model_request: 原始请求对象
164
+ response: 响应对象(可选)
165
+ duration: 请求持续时间(秒)
166
+ error: 错误对象(可选)
167
+ stream_stats: 流式响应统计信息(可选)
168
+
169
+ Returns:
170
+ Dict[str, Any]: 日志数据字典
171
+ """
172
+ data = {
173
+ "provider": model_request.provider.value,
174
+ "invoke_type": model_request.invoke_type.value,
175
+ "model": getattr(model_request, 'model', None),
176
+ "stream": getattr(model_request, 'stream', False),
177
+ }
178
+
179
+ # 添加用户上下文信息(如果有)
180
+ if hasattr(model_request, 'user_context'):
181
+ data.update({
182
+ "org_id": model_request.user_context.org_id,
183
+ "user_id": model_request.user_context.user_id,
184
+ "client_type": model_request.user_context.client_type
185
+ })
186
+
187
+ # 添加请求中的 tool 信息
188
+ if hasattr(model_request, 'tools') and model_request.tools:
189
+ data["tools_count"] = len(model_request.tools) if isinstance(model_request.tools, list) else 1
190
+ data["has_tools"] = True
191
+
192
+ if hasattr(model_request, 'tool_choice') and model_request.tool_choice:
193
+ data["tool_choice"] = str(model_request.tool_choice)
194
+
195
+ # 添加响应信息
196
+ if response:
197
+ if hasattr(response, 'content') and response.content:
198
+ data["content_length"] = len(response.content)
199
+ if hasattr(response, 'usage'):
200
+ data["usage"] = response.usage
201
+
202
+ # 新增:tool_calls 相关日志
203
+ if hasattr(response, 'tool_calls') and response.tool_calls:
204
+ data["tool_calls_count"] = len(response.tool_calls)
205
+ data["has_tool_calls"] = True
206
+
207
+ if hasattr(response, 'finish_reason') and response.finish_reason:
208
+ data["finish_reason"] = response.finish_reason
209
+
210
+ # 添加流式响应统计
211
+ if stream_stats:
212
+ data.update(stream_stats)
213
+
214
+ # 添加错误信息
215
+ if error:
216
+ data["error_type"] = type(error).__name__
217
+ data["error_message"] = str(error)
218
+
219
+ return data
@@ -2,11 +2,12 @@
2
2
  Schema definitions for the API
3
3
  """
4
4
 
5
- from .inputs import UserContext, ModelRequest, BatchModelRequestItem, BatchModelRequest
5
+ from .inputs import UserContext, ModelRequest, BatchModelRequestItem, BatchModelRequest, TamarFileIdInput
6
6
  from .outputs import ModelResponse, BatchModelResponse
7
7
 
8
8
  __all__ = [
9
9
  # Model Inputs
10
+ "TamarFileIdInput",
10
11
  "UserContext",
11
12
  "ModelRequest",
12
13
  "BatchModelRequestItem",
@@ -26,6 +26,10 @@ class UserContext(BaseModel):
26
26
  client_type: str # 客户端类型,这里记录的是哪个服务请求过来的
27
27
 
28
28
 
29
+ class TamarFileIdInput(BaseModel):
30
+ file_id: str
31
+
32
+
29
33
  class GoogleGenAiInput(BaseModel):
30
34
  model: str
31
35
  contents: Union[types.ContentListUnion, types.ContentListUnionDict]
@@ -179,11 +183,11 @@ class OpenAIImagesInput(BaseModel):
179
183
 
180
184
 
181
185
  class OpenAIImagesEditInput(BaseModel):
182
- image: Union[FileTypes, List[FileTypes]]
186
+ image: Union[FileTypes, List[FileTypes], TamarFileIdInput, List[TamarFileIdInput]]
183
187
  prompt: str
184
188
  background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN
185
189
  input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN
186
- mask: FileTypes | NotGiven = NOT_GIVEN
190
+ mask: FileTypes | TamarFileIdInput | NotGiven = NOT_GIVEN
187
191
  model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN
188
192
  n: Optional[int] | NotGiven = NOT_GIVEN
189
193
  output_compression: Optional[int] | NotGiven = NOT_GIVEN
@@ -277,7 +281,7 @@ class ModelRequestInput(BaseRequest):
277
281
  config: Optional[Union[types.GenerateContentConfigOrDict, types.GenerateImagesConfigOrDict]] = None
278
282
 
279
283
  # Images(OpenAI Images / Images Edit / Google Vertex Images 合并)
280
- image: Optional[Union[FileTypes, List[FileTypes]]] = None
284
+ image: Optional[Union[FileTypes, List[FileTypes], TamarFileIdInput, List[TamarFileIdInput]]] = None
281
285
  # background 同名字段合并:Responses 的 bool(后台任务)+ Images 的透明度枚举
282
286
  background: Optional[Union[bool, Literal["transparent", "opaque", "auto"], NotGiven]] = NOT_GIVEN
283
287
  moderation: Optional[Union[Literal["low", "auto"], NotGiven]] = NOT_GIVEN
@@ -285,7 +289,7 @@ class ModelRequestInput(BaseRequest):
285
289
  output_compression: Optional[Union[int, NotGiven]] = NOT_GIVEN
286
290
  output_format: Optional[Union[Literal["png", "jpeg", "webp"], NotGiven]] = NOT_GIVEN
287
291
  partial_images: Optional[Union[int, NotGiven]] = NOT_GIVEN
288
- mask: Union[FileTypes, NotGiven] = NOT_GIVEN
292
+ mask: Union[FileTypes, TamarFileIdInput, NotGiven] = NOT_GIVEN
289
293
  negative_prompt: Optional[str] = None
290
294
  aspect_ratio: Optional[Literal["1:1", "9:16", "16:9", "4:3", "3:4"]] = None
291
295
  guidance_scale: Optional[float] = None
@@ -15,8 +15,27 @@ class BaseResponse(BaseModel):
15
15
 
16
16
 
17
17
  class ModelResponse(BaseResponse):
18
+ """增强的模型响应类,对标 OpenAI SDK 的 Tool Call 支持"""
19
+
18
20
  model_config = ConfigDict(arbitrary_types_allowed=True)
21
+
19
22
  request_id: Optional[str] = None # 请求ID,用于跟踪请求
23
+
24
+ # 新增字段 - 对标 OpenAI SDK
25
+ tool_calls: Optional[List[Dict[str, Any]]] = None
26
+ """Tool calls 列表,对应 OpenAI SDK 的 message.tool_calls"""
27
+
28
+ finish_reason: Optional[str] = None
29
+ """完成原因,对应 OpenAI SDK 的 choice.finish_reason"""
30
+
31
+ # 基础便利方法
32
+ def has_tool_calls(self) -> bool:
33
+ """检查响应是否包含 tool calls
34
+
35
+ Returns:
36
+ bool: 如果包含 tool calls 返回 True
37
+ """
38
+ return bool(self.tool_calls and len(self.tool_calls) > 0)
20
39
 
21
40
 
22
41
  class BatchModelResponse(BaseModel):
@@ -786,7 +786,7 @@ class TamarModelClient(BaseClient, HttpFallbackMixin):
786
786
  fetch_thread.daemon = True
787
787
  fetch_thread.start()
788
788
 
789
- chunk_timeout = 30.0 # 单个数据块的超时时间
789
+ chunk_timeout = self.stream_chunk_timeout # 单个数据块的超时时间
790
790
 
791
791
  while True:
792
792
  # 检查是否有异常
@@ -0,0 +1,169 @@
1
+ """
2
+ Tool Call 实用工具类
3
+
4
+ 提供简化 Tool Call 使用的基础工具方法,减少常见错误和样板代码。
5
+ 注意:本工具类仅提供数据处理便利,不包含自动执行功能。
6
+ """
7
+
8
+ import json
9
+ from typing import List, Dict, Any, Optional
10
+
11
+ from .schemas.outputs import ModelResponse
12
+
13
+
14
+ class ToolCallHelper:
15
+ """Tool Call 实用工具类
16
+
17
+ 提供基础的数据处理方法,对标 OpenAI SDK 的使用体验。
18
+ """
19
+
20
+ @staticmethod
21
+ def create_function_tool(
22
+ name: str,
23
+ description: str,
24
+ parameters: Dict[str, Any],
25
+ strict: Optional[bool] = None
26
+ ) -> Dict[str, Any]:
27
+ """创建函数工具定义(对标 OpenAI SDK 的工具定义格式)
28
+
29
+ Args:
30
+ name: 函数名称
31
+ description: 函数描述
32
+ parameters: 函数参数的 JSON Schema
33
+ strict: 是否启用严格模式(OpenAI Structured Outputs)
34
+
35
+ Returns:
36
+ ChatCompletionToolParam: 工具定义对象
37
+
38
+ Example:
39
+ >>> weather_tool = ToolCallHelper.create_function_tool(
40
+ ... name="get_weather",
41
+ ... description="获取指定城市的天气信息",
42
+ ... parameters={
43
+ ... "type": "object",
44
+ ... "properties": {
45
+ ... "location": {"type": "string", "description": "城市名称"}
46
+ ... },
47
+ ... "required": ["location"]
48
+ ... }
49
+ ... )
50
+ """
51
+ tool_def = {
52
+ "type": "function",
53
+ "function": {
54
+ "name": name,
55
+ "description": description,
56
+ "parameters": parameters
57
+ }
58
+ }
59
+
60
+ if strict is not None:
61
+ tool_def["function"]["strict"] = strict
62
+
63
+ return tool_def
64
+
65
+ @staticmethod
66
+ def create_tool_response_message(
67
+ tool_call_id: str,
68
+ content: str,
69
+ name: Optional[str] = None
70
+ ) -> Dict[str, Any]:
71
+ """创建工具响应消息(对标 OpenAI SDK 的消息格式)
72
+
73
+ Args:
74
+ tool_call_id: 工具调用 ID
75
+ content: 工具执行结果
76
+ name: 工具名称(可选)
77
+
78
+ Returns:
79
+ ChatCompletionMessageParam: 工具响应消息
80
+
81
+ Example:
82
+ >>> tool_message = ToolCallHelper.create_tool_response_message(
83
+ ... tool_call_id="call_123",
84
+ ... content="北京今天晴天,25°C",
85
+ ... name="get_weather"
86
+ ... )
87
+ """
88
+ message = {
89
+ "role": "tool",
90
+ "tool_call_id": tool_call_id,
91
+ "content": content
92
+ }
93
+
94
+ if name:
95
+ message["name"] = name
96
+
97
+ return message
98
+
99
+ @staticmethod
100
+ def parse_function_arguments(tool_call: Dict[str, Any]) -> Dict[str, Any]:
101
+ """安全解析函数参数(解决 OpenAI SDK 需要手动 json.loads 的痛点)
102
+
103
+ Args:
104
+ tool_call: 工具调用对象
105
+
106
+ Returns:
107
+ Dict[str, Any]: 解析后的参数字典
108
+
109
+ Raises:
110
+ ValueError: 不支持的工具类型或参数解析失败
111
+
112
+ Example:
113
+ >>> tool_call = response.tool_calls[0]
114
+ >>> arguments = ToolCallHelper.parse_function_arguments(tool_call)
115
+ >>> print(arguments["location"]) # "北京"
116
+ """
117
+ if tool_call.get("type") != "function":
118
+ raise ValueError(f"不支持的工具类型: {tool_call.get('type')}")
119
+
120
+ function = tool_call.get("function", {})
121
+ arguments_str = function.get("arguments", "{}")
122
+
123
+ try:
124
+ return json.loads(arguments_str)
125
+ except json.JSONDecodeError as e:
126
+ raise ValueError(f"解析工具参数失败: {arguments_str}") from e
127
+
128
+ @staticmethod
129
+ def build_messages_with_tool_response(
130
+ original_messages: List[Dict[str, Any]],
131
+ assistant_message: ModelResponse,
132
+ tool_responses: List[Dict[str, Any]]
133
+ ) -> List[Dict[str, Any]]:
134
+ """构建包含工具响应的消息列表(简化版工具方法)
135
+
136
+ Args:
137
+ original_messages: 原始消息列表
138
+ assistant_message: 包含 tool calls 的助手响应
139
+ tool_responses: 工具响应列表
140
+
141
+ Returns:
142
+ List[Dict[str, Any]]: 新的消息列表
143
+
144
+ Example:
145
+ >>> new_messages = ToolCallHelper.build_messages_with_tool_response(
146
+ ... original_messages=request.messages,
147
+ ... assistant_message=response,
148
+ ... tool_responses=[tool_message]
149
+ ... )
150
+ >>> # 然后开发者手动创建新请求发送
151
+ """
152
+ new_messages = list(original_messages)
153
+
154
+ # 添加助手的响应消息
155
+ assistant_msg = {
156
+ "role": "assistant",
157
+ "content": assistant_message.content or ""
158
+ }
159
+
160
+ # 如果有 tool calls,添加到消息中
161
+ if assistant_message.has_tool_calls():
162
+ assistant_msg["tool_calls"] = assistant_message.tool_calls
163
+
164
+ new_messages.append(assistant_msg)
165
+
166
+ # 添加工具响应消息
167
+ new_messages.extend(tool_responses)
168
+
169
+ return new_messages
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tamar-model-client
3
- Version: 0.2.5
3
+ Version: 0.2.7
4
4
  Summary: A Python SDK for interacting with the Model Manager gRPC service
5
5
  Home-page: http://gitlab.tamaredge.top/project-tap/AgentOS/model-manager-client
6
6
  Author: Oscar Ou
@@ -16,8 +16,8 @@ Requires-Dist: grpcio-tools~=1.67.1
16
16
  Requires-Dist: pydantic
17
17
  Requires-Dist: PyJWT
18
18
  Requires-Dist: nest_asyncio
19
- Requires-Dist: openai
20
- Requires-Dist: google-genai
19
+ Requires-Dist: openai>=1.99.3
20
+ Requires-Dist: google-genai>=1.29.0
21
21
  Requires-Dist: requests>=2.25.0
22
22
  Requires-Dist: aiohttp>=3.7.0
23
23
  Dynamic: author
@@ -9,6 +9,7 @@ tamar_model_client/exceptions.py
9
9
  tamar_model_client/json_formatter.py
10
10
  tamar_model_client/logging_icons.py
11
11
  tamar_model_client/sync_client.py
12
+ tamar_model_client/tool_call_helper.py
12
13
  tamar_model_client/utils.py
13
14
  tamar_model_client.egg-info/PKG-INFO
14
15
  tamar_model_client.egg-info/SOURCES.txt
@@ -35,4 +36,5 @@ tamar_model_client/schemas/inputs.py
35
36
  tamar_model_client/schemas/outputs.py
36
37
  tests/__init__.py
37
38
  tests/test_circuit_breaker.py
38
- tests/test_google_azure_final.py
39
+ tests/test_google_azure_final.py
40
+ tests/test_tool_call_enhancement.py
@@ -3,7 +3,7 @@ grpcio-tools~=1.67.1
3
3
  pydantic
4
4
  PyJWT
5
5
  nest_asyncio
6
- openai
7
- google-genai
6
+ openai>=1.99.3
7
+ google-genai>=1.29.0
8
8
  requests>=2.25.0
9
9
  aiohttp>=3.7.0
@@ -1471,36 +1471,36 @@ async def main():
1471
1471
 
1472
1472
  try:
1473
1473
  # 同步测试
1474
- # test_google_ai_studio()
1475
- # test_google_vertex_ai()
1476
- # test_azure_openai()
1474
+ test_google_ai_studio()
1475
+ test_google_vertex_ai()
1476
+ test_azure_openai()
1477
1477
 
1478
1478
  # 新增:图像生成测试
1479
- # test_google_genai_image_generation()
1480
- # test_google_vertex_ai_image_generation()
1479
+ test_google_genai_image_generation()
1480
+ test_google_vertex_ai_image_generation()
1481
1481
 
1482
1482
  # 同步批量测试
1483
- # test_sync_batch_requests()
1483
+ test_sync_batch_requests()
1484
1484
 
1485
1485
  # 异步流式测试
1486
- # await asyncio.wait_for(test_google_streaming(), timeout=60.0)
1487
- # await asyncio.wait_for(test_azure_streaming(), timeout=60.0)
1486
+ await asyncio.wait_for(test_google_streaming(), timeout=60.0)
1487
+ await asyncio.wait_for(test_azure_streaming(), timeout=60.0)
1488
1488
 
1489
- # 新增:异步图像生成测试
1490
- # await asyncio.wait_for(test_google_genai_image_generation_async(), timeout=120.0)
1491
- # await asyncio.wait_for(test_google_vertex_ai_image_generation_async(), timeout=120.0)
1492
- #
1493
- # # 异步批量测试
1494
- # await asyncio.wait_for(test_batch_requests(), timeout=120.0)
1495
- #
1496
- # # 新增:图像生成批量测试
1497
- # await asyncio.wait_for(test_image_generation_batch(), timeout=180.0)
1489
+ #:异步图像生成测试
1490
+ await asyncio.wait_for(test_google_genai_image_generation_async(), timeout=120.0)
1491
+ await asyncio.wait_for(test_google_vertex_ai_image_generation_async(), timeout=120.0)
1492
+
1493
+ # 异步批量测试
1494
+ await asyncio.wait_for(test_batch_requests(), timeout=120.0)
1495
+
1496
+ # 新增:图像生成批量测试
1497
+ await asyncio.wait_for(test_image_generation_batch(), timeout=180.0)
1498
1498
 
1499
1499
  # 同步并发测试
1500
- # test_concurrent_requests(2) # 测试150个并发请求
1501
- #
1502
- # # 异步并发测试
1503
- # await test_async_concurrent_requests(2) # 测试50个异步并发请求(复用连接)
1500
+ test_concurrent_requests(2) # 测试150个并发请求
1501
+
1502
+ # 异步并发测试
1503
+ await test_async_concurrent_requests(2) # 测试50个异步并发请求(复用连接)
1504
1504
 
1505
1505
  print("\n✅ 测试完成")
1506
1506