tamar-model-client 0.1.2__py3-none-any.whl → 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- model_manager_client/schemas/inputs.py +3 -3
- tamar_model_client/async_client.py +109 -105
- tamar_model_client/enums/invoke.py +2 -2
- tamar_model_client/schemas/inputs.py +365 -294
- {tamar_model_client-0.1.2.dist-info → tamar_model_client-0.1.4.dist-info}/METADATA +1 -1
- {tamar_model_client-0.1.2.dist-info → tamar_model_client-0.1.4.dist-info}/RECORD +8 -8
- {tamar_model_client-0.1.2.dist-info → tamar_model_client-0.1.4.dist-info}/WHEEL +1 -1
- {tamar_model_client-0.1.2.dist-info → tamar_model_client-0.1.4.dist-info}/top_level.txt +0 -0
@@ -105,7 +105,7 @@ class OpenAIChatCompletionsInput(BaseModel):
|
|
105
105
|
class BaseRequest(BaseModel):
|
106
106
|
provider: ProviderType # 供应商,如 "openai", "google" 等
|
107
107
|
channel: Channel = Channel.NORMAL # 渠道:不同服务商之前有不同的调用SDK,这里指定是调用哪个SDK
|
108
|
-
invoke_type: InvokeType = InvokeType.
|
108
|
+
invoke_type: InvokeType = InvokeType.TEXT_GENERATION # 模型调用类型:generation-生成模型调用
|
109
109
|
|
110
110
|
|
111
111
|
class ModelRequestInput(BaseRequest):
|
@@ -190,7 +190,7 @@ class ModelRequest(ModelRequestInput):
|
|
190
190
|
expected_fields = google_required_fields
|
191
191
|
allowed_fields = google_allowed
|
192
192
|
elif self.provider == ProviderType.OPENAI or self.provider == ProviderType.AZURE:
|
193
|
-
if self.invoke_type == InvokeType.RESPONSES or self.invoke_type == InvokeType.
|
193
|
+
if self.invoke_type == InvokeType.RESPONSES or self.invoke_type == InvokeType.TEXT_GENERATION:
|
194
194
|
expected_fields = openai_responses_required_fields
|
195
195
|
allowed_fields = openai_responses_allowed
|
196
196
|
elif self.invoke_type == InvokeType.CHAT_COMPLETIONS:
|
@@ -251,7 +251,7 @@ class BatchModelRequestItem(ModelRequestInput):
|
|
251
251
|
expected_fields = google_required_fields
|
252
252
|
allowed_fields = google_allowed
|
253
253
|
elif self.provider == ProviderType.OPENAI or self.provider == ProviderType.AZURE:
|
254
|
-
if self.invoke_type == InvokeType.RESPONSES or self.invoke_type == InvokeType.
|
254
|
+
if self.invoke_type == InvokeType.RESPONSES or self.invoke_type == InvokeType.TEXT_GENERATION:
|
255
255
|
expected_fields = openai_responses_required_fields
|
256
256
|
allowed_fields = openai_responses_allowed
|
257
257
|
elif self.invoke_type == InvokeType.CHAT_COMPLETIONS:
|
@@ -1,5 +1,6 @@
|
|
1
1
|
import asyncio
|
2
2
|
import atexit
|
3
|
+
import base64
|
3
4
|
import json
|
4
5
|
import logging
|
5
6
|
import os
|
@@ -15,7 +16,8 @@ from .enums import ProviderType, InvokeType
|
|
15
16
|
from .exceptions import ConnectionError, ValidationError
|
16
17
|
from .schemas import ModelRequest, ModelResponse, BatchModelRequest, BatchModelResponse
|
17
18
|
from .generated import model_service_pb2, model_service_pb2_grpc
|
18
|
-
from .schemas.inputs import GoogleGenAiInput, OpenAIResponsesInput, OpenAIChatCompletionsInput
|
19
|
+
from .schemas.inputs import GoogleGenAiInput, OpenAIResponsesInput, OpenAIChatCompletionsInput, \
|
20
|
+
GoogleVertexAIImagesInput, OpenAIImagesInput
|
19
21
|
|
20
22
|
if not logging.getLogger().hasHandlers():
|
21
23
|
# 配置日志格式
|
@@ -27,6 +29,72 @@ if not logging.getLogger().hasHandlers():
|
|
27
29
|
logger = logging.getLogger(__name__)
|
28
30
|
|
29
31
|
|
32
|
+
def is_effective_value(value) -> bool:
|
33
|
+
"""
|
34
|
+
递归判断value是否是有意义的有效值
|
35
|
+
"""
|
36
|
+
if value is None or value is NOT_GIVEN:
|
37
|
+
return False
|
38
|
+
|
39
|
+
if isinstance(value, str):
|
40
|
+
return value.strip() != ""
|
41
|
+
|
42
|
+
if isinstance(value, bytes):
|
43
|
+
return len(value) > 0
|
44
|
+
|
45
|
+
if isinstance(value, dict):
|
46
|
+
for v in value.values():
|
47
|
+
if is_effective_value(v):
|
48
|
+
return True
|
49
|
+
return False
|
50
|
+
|
51
|
+
if isinstance(value, list):
|
52
|
+
for item in value:
|
53
|
+
if is_effective_value(item):
|
54
|
+
return True
|
55
|
+
return False
|
56
|
+
|
57
|
+
return True # 其他类型(int/float/bool)只要不是None就算有效
|
58
|
+
|
59
|
+
|
60
|
+
def serialize_value(value):
|
61
|
+
"""递归处理单个值,处理BaseModel, dict, list, bytes"""
|
62
|
+
if not is_effective_value(value):
|
63
|
+
return None
|
64
|
+
if isinstance(value, BaseModel):
|
65
|
+
return serialize_value(value.model_dump())
|
66
|
+
if hasattr(value, "dict") and callable(value.dict):
|
67
|
+
return serialize_value(value.dict())
|
68
|
+
if isinstance(value, dict):
|
69
|
+
return {k: serialize_value(v) for k, v in value.items()}
|
70
|
+
if isinstance(value, list) or (isinstance(value, Iterable) and not isinstance(value, (str, bytes))):
|
71
|
+
return [serialize_value(v) for v in value]
|
72
|
+
if isinstance(value, bytes):
|
73
|
+
return f"bytes:{base64.b64encode(value).decode('utf-8')}"
|
74
|
+
return value
|
75
|
+
|
76
|
+
|
77
|
+
from typing import Any
|
78
|
+
|
79
|
+
|
80
|
+
def remove_none_from_dict(data: Any) -> Any:
|
81
|
+
"""
|
82
|
+
遍历 dict/list,递归删除 value 为 None 的字段
|
83
|
+
"""
|
84
|
+
if isinstance(data, dict):
|
85
|
+
new_dict = {}
|
86
|
+
for key, value in data.items():
|
87
|
+
if value is None:
|
88
|
+
continue
|
89
|
+
cleaned_value = remove_none_from_dict(value)
|
90
|
+
new_dict[key] = cleaned_value
|
91
|
+
return new_dict
|
92
|
+
elif isinstance(data, list):
|
93
|
+
return [remove_none_from_dict(item) for item in data]
|
94
|
+
else:
|
95
|
+
return data
|
96
|
+
|
97
|
+
|
30
98
|
class AsyncTamarModelClient:
|
31
99
|
def __init__(
|
32
100
|
self,
|
@@ -157,17 +225,22 @@ class AsyncTamarModelClient:
|
|
157
225
|
|
158
226
|
# 动态根据 provider/invoke_type 决定使用哪个 input 字段
|
159
227
|
try:
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
228
|
+
# 选择需要校验的字段集合
|
229
|
+
# 动态分支逻辑
|
230
|
+
match (model_request.provider, model_request.invoke_type):
|
231
|
+
case (ProviderType.GOOGLE, InvokeType.GENERATION):
|
232
|
+
allowed_fields = GoogleGenAiInput.model_fields.keys()
|
233
|
+
case (ProviderType.GOOGLE, InvokeType.IMAGE_GENERATION):
|
234
|
+
allowed_fields = GoogleVertexAIImagesInput.model_fields.keys()
|
235
|
+
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.RESPONSES | InvokeType.GENERATION):
|
164
236
|
allowed_fields = OpenAIResponsesInput.model_fields.keys()
|
165
|
-
|
237
|
+
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.CHAT_COMPLETIONS):
|
166
238
|
allowed_fields = OpenAIChatCompletionsInput.model_fields.keys()
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
239
|
+
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.IMAGE_GENERATION):
|
240
|
+
allowed_fields = OpenAIImagesInput.model_fields.keys()
|
241
|
+
case _:
|
242
|
+
raise ValueError(
|
243
|
+
f"Unsupported provider/invoke_type combination: {model_request.provider} + {model_request.invoke_type}")
|
171
244
|
|
172
245
|
# 将 ModelRequest 转 dict,过滤只保留 base + allowed 的字段
|
173
246
|
model_request_dict = model_request.model_dump(exclude_unset=True)
|
@@ -177,51 +250,15 @@ class AsyncTamarModelClient:
|
|
177
250
|
if field in model_request_dict:
|
178
251
|
value = model_request_dict[field]
|
179
252
|
|
180
|
-
#
|
181
|
-
if
|
253
|
+
# 跳过无效的值
|
254
|
+
if not is_effective_value(value):
|
182
255
|
continue
|
183
256
|
|
184
|
-
#
|
185
|
-
|
186
|
-
|
187
|
-
#
|
188
|
-
|
189
|
-
grpc_request_kwargs[field] = value.dict()
|
190
|
-
# 如果是 list,需要处理里面元素也是自定义对象的情况
|
191
|
-
elif isinstance(value, Iterable) and not isinstance(value, (str, bytes, dict)):
|
192
|
-
new_list = []
|
193
|
-
for item in value:
|
194
|
-
if isinstance(item, BaseModel):
|
195
|
-
new_list.append(item.model_dump())
|
196
|
-
elif hasattr(item, "dict") and callable(item.dict):
|
197
|
-
new_list.append(item.dict())
|
198
|
-
elif isinstance(item, dict):
|
199
|
-
# Handle nested dictionaries
|
200
|
-
nested_dict = {}
|
201
|
-
for k, v in item.items():
|
202
|
-
if isinstance(v, BaseModel):
|
203
|
-
nested_dict[k] = v.model_dump()
|
204
|
-
elif hasattr(v, "dict") and callable(v.dict):
|
205
|
-
nested_dict[k] = v.dict()
|
206
|
-
else:
|
207
|
-
nested_dict[k] = v
|
208
|
-
new_list.append(nested_dict)
|
209
|
-
else:
|
210
|
-
new_list.append(item)
|
211
|
-
grpc_request_kwargs[field] = new_list
|
212
|
-
# 如果是 dict,同理处理内部元素
|
213
|
-
elif isinstance(value, dict):
|
214
|
-
new_dict = {}
|
215
|
-
for k, v in value.items():
|
216
|
-
if isinstance(v, BaseModel):
|
217
|
-
new_dict[k] = v.model_dump()
|
218
|
-
elif hasattr(v, "dict") and callable(v.dict):
|
219
|
-
new_dict[k] = v.dict()
|
220
|
-
else:
|
221
|
-
new_dict[k] = v
|
222
|
-
grpc_request_kwargs[field] = new_dict
|
223
|
-
else:
|
224
|
-
grpc_request_kwargs[field] = value
|
257
|
+
# 序列化grpc不支持的类型
|
258
|
+
grpc_request_kwargs[field] = serialize_value(value)
|
259
|
+
|
260
|
+
# 清理 serialize后的 grpc_request_kwargs
|
261
|
+
grpc_request_kwargs = remove_none_from_dict(grpc_request_kwargs)
|
225
262
|
|
226
263
|
request = model_service_pb2.ModelRequestItem(
|
227
264
|
provider=model_request.provider.value,
|
@@ -280,17 +317,20 @@ class AsyncTamarModelClient:
|
|
280
317
|
for model_request_item in batch_request_model.items:
|
281
318
|
# 动态根据 provider/invoke_type 决定使用哪个 input 字段
|
282
319
|
try:
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
320
|
+
match (model_request_item.provider, model_request_item.invoke_type):
|
321
|
+
case (ProviderType.GOOGLE, InvokeType.GENERATION):
|
322
|
+
allowed_fields = GoogleGenAiInput.model_fields.keys()
|
323
|
+
case (ProviderType.GOOGLE, InvokeType.IMAGE_GENERATION):
|
324
|
+
allowed_fields = GoogleVertexAIImagesInput.model_fields.keys()
|
325
|
+
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.RESPONSES | InvokeType.GENERATION):
|
287
326
|
allowed_fields = OpenAIResponsesInput.model_fields.keys()
|
288
|
-
|
327
|
+
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.CHAT_COMPLETIONS):
|
289
328
|
allowed_fields = OpenAIChatCompletionsInput.model_fields.keys()
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
329
|
+
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.IMAGE_GENERATION):
|
330
|
+
allowed_fields = OpenAIImagesInput.model_fields.keys()
|
331
|
+
case _:
|
332
|
+
raise ValueError(
|
333
|
+
f"Unsupported provider/invoke_type combination: {model_request_item.provider} + {model_request_item.invoke_type}")
|
294
334
|
|
295
335
|
# 将 ModelRequest 转 dict,过滤只保留 base + allowed 的字段
|
296
336
|
model_request_dict = model_request_item.model_dump(exclude_unset=True)
|
@@ -300,51 +340,15 @@ class AsyncTamarModelClient:
|
|
300
340
|
if field in model_request_dict:
|
301
341
|
value = model_request_dict[field]
|
302
342
|
|
303
|
-
#
|
304
|
-
if
|
343
|
+
# 跳过无效的值
|
344
|
+
if not is_effective_value(value):
|
305
345
|
continue
|
306
346
|
|
307
|
-
#
|
308
|
-
|
309
|
-
|
310
|
-
#
|
311
|
-
|
312
|
-
grpc_request_kwargs[field] = value.dict()
|
313
|
-
# 如果是 list,需要处理里面元素也是自定义对象的情况
|
314
|
-
elif isinstance(value, Iterable) and not isinstance(value, (str, bytes, dict)):
|
315
|
-
new_list = []
|
316
|
-
for item in value:
|
317
|
-
if isinstance(item, BaseModel):
|
318
|
-
new_list.append(item.model_dump())
|
319
|
-
elif hasattr(item, "dict") and callable(item.dict):
|
320
|
-
new_list.append(item.dict())
|
321
|
-
elif isinstance(item, dict):
|
322
|
-
# Handle nested dictionaries
|
323
|
-
nested_dict = {}
|
324
|
-
for k, v in item.items():
|
325
|
-
if isinstance(v, BaseModel):
|
326
|
-
nested_dict[k] = v.model_dump()
|
327
|
-
elif hasattr(v, "dict") and callable(v.dict):
|
328
|
-
nested_dict[k] = v.dict()
|
329
|
-
else:
|
330
|
-
nested_dict[k] = v
|
331
|
-
new_list.append(nested_dict)
|
332
|
-
else:
|
333
|
-
new_list.append(item)
|
334
|
-
grpc_request_kwargs[field] = new_list
|
335
|
-
# 如果是 dict,同理处理内部元素
|
336
|
-
elif isinstance(value, dict):
|
337
|
-
new_dict = {}
|
338
|
-
for k, v in value.items():
|
339
|
-
if isinstance(v, BaseModel):
|
340
|
-
new_dict[k] = v.model_dump()
|
341
|
-
elif hasattr(v, "dict") and callable(v.dict):
|
342
|
-
new_dict[k] = v.dict()
|
343
|
-
else:
|
344
|
-
new_dict[k] = v
|
345
|
-
grpc_request_kwargs[field] = new_dict
|
346
|
-
else:
|
347
|
-
grpc_request_kwargs[field] = value
|
347
|
+
# 序列化grpc不支持的类型
|
348
|
+
grpc_request_kwargs[field] = serialize_value(value)
|
349
|
+
|
350
|
+
# 清理 serialize后的 grpc_request_kwargs
|
351
|
+
grpc_request_kwargs = remove_none_from_dict(grpc_request_kwargs)
|
348
352
|
|
349
353
|
items.append(model_service_pb2.ModelRequestItem(
|
350
354
|
provider=model_request_item.provider.value,
|
@@ -1,294 +1,365 @@
|
|
1
|
-
import httpx
|
2
|
-
from google.genai import types
|
3
|
-
from openai import NotGiven, NOT_GIVEN
|
4
|
-
from openai._types import Headers, Query, Body
|
5
|
-
from openai.types import ChatModel, Metadata, ReasoningEffort, ResponsesModel, Reasoning
|
6
|
-
from openai.types.chat import ChatCompletionMessageParam, ChatCompletionAudioParam, completion_create_params, \
|
7
|
-
ChatCompletionPredictionContentParam, ChatCompletionStreamOptionsParam, ChatCompletionToolChoiceOptionParam, \
|
8
|
-
ChatCompletionToolParam
|
9
|
-
from openai.types.responses import ResponseInputParam, ResponseIncludable, ResponseTextConfigParam, \
|
10
|
-
response_create_params, ToolParam
|
11
|
-
from pydantic import BaseModel, model_validator
|
12
|
-
from typing import List, Optional, Union, Iterable, Dict, Literal
|
13
|
-
|
14
|
-
from tamar_model_client.enums import ProviderType, InvokeType
|
15
|
-
from tamar_model_client.enums.channel import Channel
|
16
|
-
|
17
|
-
|
18
|
-
class UserContext(BaseModel):
|
19
|
-
org_id: str # 组织id
|
20
|
-
user_id: str # 用户id
|
21
|
-
client_type: str # 客户端类型,这里记录的是哪个服务请求过来的
|
22
|
-
|
23
|
-
|
24
|
-
class GoogleGenAiInput(BaseModel):
|
25
|
-
model: str
|
26
|
-
contents: Union[types.ContentListUnion, types.ContentListUnionDict]
|
27
|
-
config: Optional[types.GenerateContentConfigOrDict] = None
|
28
|
-
|
29
|
-
model_config = {
|
30
|
-
"arbitrary_types_allowed": True
|
31
|
-
}
|
32
|
-
|
33
|
-
|
34
|
-
class
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
"""
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
#
|
245
|
-
google_required_fields = {"model", "contents"}
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
allowed_fields =
|
257
|
-
|
258
|
-
|
259
|
-
allowed_fields =
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
1
|
+
import httpx
|
2
|
+
from google.genai import types
|
3
|
+
from openai import NotGiven, NOT_GIVEN
|
4
|
+
from openai._types import Headers, Query, Body
|
5
|
+
from openai.types import ChatModel, Metadata, ReasoningEffort, ResponsesModel, Reasoning, ImageModel
|
6
|
+
from openai.types.chat import ChatCompletionMessageParam, ChatCompletionAudioParam, completion_create_params, \
|
7
|
+
ChatCompletionPredictionContentParam, ChatCompletionStreamOptionsParam, ChatCompletionToolChoiceOptionParam, \
|
8
|
+
ChatCompletionToolParam
|
9
|
+
from openai.types.responses import ResponseInputParam, ResponseIncludable, ResponseTextConfigParam, \
|
10
|
+
response_create_params, ToolParam
|
11
|
+
from pydantic import BaseModel, model_validator
|
12
|
+
from typing import List, Optional, Union, Iterable, Dict, Literal
|
13
|
+
|
14
|
+
from tamar_model_client.enums import ProviderType, InvokeType
|
15
|
+
from tamar_model_client.enums.channel import Channel
|
16
|
+
|
17
|
+
|
18
|
+
class UserContext(BaseModel):
|
19
|
+
org_id: str # 组织id
|
20
|
+
user_id: str # 用户id
|
21
|
+
client_type: str # 客户端类型,这里记录的是哪个服务请求过来的
|
22
|
+
|
23
|
+
|
24
|
+
class GoogleGenAiInput(BaseModel):
|
25
|
+
model: str
|
26
|
+
contents: Union[types.ContentListUnion, types.ContentListUnionDict]
|
27
|
+
config: Optional[types.GenerateContentConfigOrDict] = None
|
28
|
+
|
29
|
+
model_config = {
|
30
|
+
"arbitrary_types_allowed": True
|
31
|
+
}
|
32
|
+
|
33
|
+
|
34
|
+
class GoogleVertexAIImagesInput(BaseModel):
|
35
|
+
model: str
|
36
|
+
prompt: str
|
37
|
+
negative_prompt: Optional[str] = None
|
38
|
+
number_of_images: int = 1
|
39
|
+
aspect_ratio: Optional[Literal["1:1", "9:16", "16:9", "4:3", "3:4"]] = None
|
40
|
+
guidance_scale: Optional[float] = None
|
41
|
+
language: Optional[str] = None
|
42
|
+
seed: Optional[int] = None
|
43
|
+
output_gcs_uri: Optional[str] = None
|
44
|
+
add_watermark: Optional[bool] = True
|
45
|
+
safety_filter_level: Optional[
|
46
|
+
Literal["block_most", "block_some", "block_few", "block_fewest"]
|
47
|
+
] = None
|
48
|
+
person_generation: Optional[
|
49
|
+
Literal["dont_allow", "allow_adult", "allow_all"]
|
50
|
+
] = None
|
51
|
+
|
52
|
+
model_config = {
|
53
|
+
"arbitrary_types_allowed": True
|
54
|
+
}
|
55
|
+
|
56
|
+
|
57
|
+
class OpenAIResponsesInput(BaseModel):
|
58
|
+
input: Union[str, ResponseInputParam]
|
59
|
+
model: ResponsesModel
|
60
|
+
include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN
|
61
|
+
instructions: Optional[str] | NotGiven = NOT_GIVEN
|
62
|
+
max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN
|
63
|
+
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN
|
64
|
+
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN
|
65
|
+
previous_response_id: Optional[str] | NotGiven = NOT_GIVEN
|
66
|
+
reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN
|
67
|
+
store: Optional[bool] | NotGiven = NOT_GIVEN
|
68
|
+
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN
|
69
|
+
temperature: Optional[float] | NotGiven = NOT_GIVEN
|
70
|
+
text: ResponseTextConfigParam | NotGiven = NOT_GIVEN
|
71
|
+
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN
|
72
|
+
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN
|
73
|
+
top_p: Optional[float] | NotGiven = NOT_GIVEN
|
74
|
+
truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN
|
75
|
+
user: str | NotGiven = NOT_GIVEN
|
76
|
+
extra_headers: Headers | None = None
|
77
|
+
extra_query: Query | None = None
|
78
|
+
extra_body: Body | None = None
|
79
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN
|
80
|
+
|
81
|
+
model_config = {
|
82
|
+
"arbitrary_types_allowed": True
|
83
|
+
}
|
84
|
+
|
85
|
+
|
86
|
+
class OpenAIChatCompletionsInput(BaseModel):
|
87
|
+
messages: Iterable[ChatCompletionMessageParam]
|
88
|
+
model: Union[str, ChatModel]
|
89
|
+
audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN
|
90
|
+
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN
|
91
|
+
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN
|
92
|
+
functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN
|
93
|
+
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN
|
94
|
+
logprobs: Optional[bool] | NotGiven = NOT_GIVEN
|
95
|
+
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN
|
96
|
+
max_tokens: Optional[int] | NotGiven = NOT_GIVEN
|
97
|
+
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN
|
98
|
+
modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN
|
99
|
+
n: Optional[int] | NotGiven = NOT_GIVEN
|
100
|
+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN
|
101
|
+
prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN
|
102
|
+
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN
|
103
|
+
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN
|
104
|
+
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN
|
105
|
+
seed: Optional[int] | NotGiven = NOT_GIVEN
|
106
|
+
service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN
|
107
|
+
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN
|
108
|
+
store: Optional[bool] | NotGiven = NOT_GIVEN
|
109
|
+
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN
|
110
|
+
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN
|
111
|
+
temperature: Optional[float] | NotGiven = NOT_GIVEN
|
112
|
+
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN
|
113
|
+
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN
|
114
|
+
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN
|
115
|
+
top_p: Optional[float] | NotGiven = NOT_GIVEN
|
116
|
+
user: str | NotGiven = NOT_GIVEN
|
117
|
+
web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN
|
118
|
+
extra_headers: Headers | None = None
|
119
|
+
extra_query: Query | None = None
|
120
|
+
extra_body: Body | None = None
|
121
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN
|
122
|
+
|
123
|
+
model_config = {
|
124
|
+
"arbitrary_types_allowed": True
|
125
|
+
}
|
126
|
+
|
127
|
+
|
128
|
+
class OpenAIImagesInput(BaseModel):
|
129
|
+
prompt: str
|
130
|
+
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN
|
131
|
+
n: Optional[int] | NotGiven = NOT_GIVEN
|
132
|
+
quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN
|
133
|
+
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN
|
134
|
+
size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN
|
135
|
+
style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN
|
136
|
+
user: str | NotGiven = NOT_GIVEN
|
137
|
+
extra_headers: Headers | None = None
|
138
|
+
extra_query: Query | None = None
|
139
|
+
extra_body: Body | None = None
|
140
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN
|
141
|
+
|
142
|
+
model_config = {
|
143
|
+
"arbitrary_types_allowed": True
|
144
|
+
}
|
145
|
+
|
146
|
+
|
147
|
+
class BaseRequest(BaseModel):
|
148
|
+
provider: ProviderType # 供应商,如 "openai", "google" 等
|
149
|
+
channel: Channel = Channel.NORMAL # 渠道:不同服务商之前有不同的调用SDK,这里指定是调用哪个SDK
|
150
|
+
invoke_type: InvokeType = InvokeType.GENERATION # 模型调用类型:generation-生成模型调用
|
151
|
+
|
152
|
+
|
153
|
+
class ModelRequestInput(BaseRequest):
|
154
|
+
# 合并 model 字段
|
155
|
+
model: Optional[Union[str, ResponsesModel, ChatModel, ImageModel]] = None
|
156
|
+
|
157
|
+
# OpenAI Responses Input
|
158
|
+
input: Optional[Union[str, ResponseInputParam]] = None
|
159
|
+
include: Optional[Union[List[ResponseIncludable], NotGiven]] = NOT_GIVEN
|
160
|
+
instructions: Optional[Union[str, NotGiven]] = NOT_GIVEN
|
161
|
+
max_output_tokens: Optional[Union[int, NotGiven]] = NOT_GIVEN
|
162
|
+
metadata: Optional[Union[Metadata, NotGiven]] = NOT_GIVEN
|
163
|
+
parallel_tool_calls: Optional[Union[bool, NotGiven]] = NOT_GIVEN
|
164
|
+
previous_response_id: Optional[Union[str, NotGiven]] = NOT_GIVEN
|
165
|
+
reasoning: Optional[Union[Reasoning, NotGiven]] = NOT_GIVEN
|
166
|
+
store: Optional[Union[bool, NotGiven]] = NOT_GIVEN
|
167
|
+
stream: Optional[Union[Literal[False], Literal[True], NotGiven]] = NOT_GIVEN
|
168
|
+
temperature: Optional[Union[float, NotGiven]] = NOT_GIVEN
|
169
|
+
text: Optional[Union[ResponseTextConfigParam, NotGiven]] = NOT_GIVEN
|
170
|
+
tool_choice: Optional[
|
171
|
+
Union[response_create_params.ToolChoice, ChatCompletionToolChoiceOptionParam, NotGiven]
|
172
|
+
] = NOT_GIVEN
|
173
|
+
tools: Optional[Union[Iterable[ToolParam], Iterable[ChatCompletionToolParam], NotGiven]] = NOT_GIVEN
|
174
|
+
top_p: Optional[Union[float, NotGiven]] = NOT_GIVEN
|
175
|
+
truncation: Optional[Union[Literal["auto", "disabled"], NotGiven]] = NOT_GIVEN
|
176
|
+
user: Optional[Union[str, NotGiven]] = NOT_GIVEN
|
177
|
+
|
178
|
+
extra_headers: Optional[Union[Headers, None]] = None
|
179
|
+
extra_query: Optional[Union[Query, None]] = None
|
180
|
+
extra_body: Optional[Union[Body, None]] = None
|
181
|
+
timeout: Optional[Union[float, httpx.Timeout, None, NotGiven]] = NOT_GIVEN
|
182
|
+
|
183
|
+
# OpenAI Chat Completions Input
|
184
|
+
messages: Optional[Iterable[ChatCompletionMessageParam]] = None
|
185
|
+
audio: Optional[Union[ChatCompletionAudioParam, NotGiven]] = NOT_GIVEN
|
186
|
+
frequency_penalty: Optional[Union[float, NotGiven]] = NOT_GIVEN
|
187
|
+
function_call: Optional[Union[completion_create_params.FunctionCall, NotGiven]] = NOT_GIVEN
|
188
|
+
functions: Optional[Union[Iterable[completion_create_params.Function], NotGiven]] = NOT_GIVEN
|
189
|
+
logit_bias: Optional[Union[Dict[str, int], NotGiven]] = NOT_GIVEN
|
190
|
+
logprobs: Optional[Union[bool, NotGiven]] = NOT_GIVEN
|
191
|
+
max_completion_tokens: Optional[Union[int, NotGiven]] = NOT_GIVEN
|
192
|
+
modalities: Optional[Union[List[Literal["text", "audio"]], NotGiven]] = NOT_GIVEN
|
193
|
+
n: Optional[Union[int, NotGiven]] = NOT_GIVEN
|
194
|
+
prediction: Optional[Union[ChatCompletionPredictionContentParam, NotGiven]] = NOT_GIVEN
|
195
|
+
presence_penalty: Optional[Union[float, NotGiven]] = NOT_GIVEN
|
196
|
+
reasoning_effort: Optional[Union[ReasoningEffort, NotGiven]] = NOT_GIVEN
|
197
|
+
response_format: Optional[
|
198
|
+
Union[Literal["url", "b64_json"], completion_create_params.ResponseFormat, NotGiven]] = NOT_GIVEN
|
199
|
+
seed: Optional[Union[int, NotGiven]] = NOT_GIVEN
|
200
|
+
service_tier: Optional[Union[Literal["auto", "default"], NotGiven]] = NOT_GIVEN
|
201
|
+
stop: Optional[Union[Optional[str], List[str], None, NotGiven]] = NOT_GIVEN
|
202
|
+
top_logprobs: Optional[Union[int, NotGiven]] = NOT_GIVEN
|
203
|
+
web_search_options: Optional[Union[completion_create_params.WebSearchOptions, NotGiven]] = NOT_GIVEN
|
204
|
+
stream_options: Optional[Union[ChatCompletionStreamOptionsParam, NotGiven]] = NOT_GIVEN
|
205
|
+
|
206
|
+
# Google GenAI Input
|
207
|
+
contents: Optional[Union[types.ContentListUnion, types.ContentListUnionDict]] = None
|
208
|
+
config: Optional[types.GenerateContentConfigOrDict] = None
|
209
|
+
|
210
|
+
# OpenAIImagesInput + GoogleVertexAIImagesInput 合并字段
|
211
|
+
prompt: Optional[str] = None
|
212
|
+
negative_prompt: Optional[str] = None
|
213
|
+
aspect_ratio: Optional[Literal["1:1", "9:16", "16:9", "4:3", "3:4"]] = None
|
214
|
+
guidance_scale: Optional[float] = None
|
215
|
+
language: Optional[str] = None
|
216
|
+
output_gcs_uri: Optional[str] = None
|
217
|
+
add_watermark: Optional[bool] = None
|
218
|
+
safety_filter_level: Optional[Literal["block_most", "block_some", "block_few", "block_fewest"]] = None
|
219
|
+
person_generation: Optional[Literal["dont_allow", "allow_adult", "allow_all"]] = None
|
220
|
+
quality: Optional[Literal["standard", "hd"]] | NotGiven = NOT_GIVEN
|
221
|
+
size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN
|
222
|
+
style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN
|
223
|
+
number_of_images: Optional[int] = None # Google 用法
|
224
|
+
|
225
|
+
model_config = {
|
226
|
+
"arbitrary_types_allowed": True
|
227
|
+
}
|
228
|
+
|
229
|
+
|
230
|
+
class ModelRequest(ModelRequestInput):
|
231
|
+
user_context: UserContext # 用户信息
|
232
|
+
|
233
|
+
@model_validator(mode="after")
|
234
|
+
def validate_by_provider_and_invoke_type(self) -> "ModelRequest":
|
235
|
+
"""根据 provider 和 invoke_type 动态校验具体输入模型字段。"""
|
236
|
+
# 动态获取 allowed fields
|
237
|
+
base_allowed = {"provider", "channel", "invoke_type", "user_context"}
|
238
|
+
google_allowed = base_allowed | set(GoogleGenAiInput.model_fields.keys())
|
239
|
+
openai_responses_allowed = base_allowed | set(OpenAIResponsesInput.model_fields.keys())
|
240
|
+
openai_chat_allowed = base_allowed | set(OpenAIChatCompletionsInput.model_fields.keys())
|
241
|
+
openai_images_allowed = base_allowed | set(OpenAIImagesInput.model_fields.keys())
|
242
|
+
google_vertexai_images_allowed = base_allowed | set(GoogleVertexAIImagesInput.model_fields.keys())
|
243
|
+
|
244
|
+
# 各模型类型必填字段
|
245
|
+
google_required_fields = {"model", "contents"}
|
246
|
+
google_vertexai_image_required_fields = {"model", "prompt"}
|
247
|
+
|
248
|
+
openai_responses_required_fields = {"input", "model"}
|
249
|
+
openai_chat_required_fields = {"messages", "model"}
|
250
|
+
openai_image_required_fields = {"prompt"}
|
251
|
+
|
252
|
+
# 选择需要校验的字段集合
|
253
|
+
# 动态分支逻辑
|
254
|
+
match (self.provider, self.invoke_type):
|
255
|
+
case (ProviderType.GOOGLE, InvokeType.GENERATION):
|
256
|
+
allowed_fields = google_allowed
|
257
|
+
expected_fields = google_required_fields
|
258
|
+
case (ProviderType.GOOGLE, InvokeType.IMAGE_GENERATION):
|
259
|
+
allowed_fields = google_vertexai_images_allowed
|
260
|
+
expected_fields = google_vertexai_image_required_fields
|
261
|
+
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.RESPONSES | InvokeType.GENERATION):
|
262
|
+
allowed_fields = openai_responses_allowed
|
263
|
+
expected_fields = openai_responses_required_fields
|
264
|
+
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.CHAT_COMPLETIONS):
|
265
|
+
allowed_fields = openai_chat_allowed
|
266
|
+
expected_fields = openai_chat_required_fields
|
267
|
+
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.IMAGE_GENERATION):
|
268
|
+
allowed_fields = openai_images_allowed
|
269
|
+
expected_fields = openai_image_required_fields
|
270
|
+
case _:
|
271
|
+
raise ValueError(f"Unsupported provider/invoke_type combination: {self.provider} + {self.invoke_type}")
|
272
|
+
|
273
|
+
# 校验必填字段是否缺失
|
274
|
+
missing = [field for field in expected_fields if getattr(self, field, None) is None]
|
275
|
+
if missing:
|
276
|
+
raise ValueError(
|
277
|
+
f"Missing required fields for provider={self.provider} and invoke_type={self.invoke_type}: {missing}")
|
278
|
+
|
279
|
+
# 检查是否有非法字段
|
280
|
+
illegal_fields = []
|
281
|
+
valid_fields = {"provider", "channel", "invoke_type"} if self.invoke_type == InvokeType.IMAGE_GENERATION else {
|
282
|
+
"provider", "channel", "invoke_type", "stream"}
|
283
|
+
for name, value in self.__dict__.items():
|
284
|
+
if name in valid_fields:
|
285
|
+
continue
|
286
|
+
if name not in allowed_fields and value is not None and not isinstance(value, NotGiven):
|
287
|
+
illegal_fields.append(name)
|
288
|
+
|
289
|
+
if illegal_fields:
|
290
|
+
raise ValueError(
|
291
|
+
f"Unsupported fields for provider={self.provider} and invoke_type={self.invoke_type}: {illegal_fields}")
|
292
|
+
|
293
|
+
return self
|
294
|
+
|
295
|
+
|
296
|
+
class BatchModelRequestItem(ModelRequestInput):
|
297
|
+
custom_id: Optional[str] = None
|
298
|
+
priority: Optional[int] = None # (可选、预留字段)批量调用时执行的优先级
|
299
|
+
|
300
|
+
@model_validator(mode="after")
|
301
|
+
def validate_by_provider_and_invoke_type(self) -> "BatchModelRequestItem":
|
302
|
+
"""根据 provider 和 invoke_type 动态校验具体输入模型字段。"""
|
303
|
+
# 动态获取 allowed fields
|
304
|
+
base_allowed = {"provider", "channel", "invoke_type", "user_context"}
|
305
|
+
google_allowed = base_allowed | set(GoogleGenAiInput.model_fields.keys())
|
306
|
+
openai_responses_allowed = base_allowed | set(OpenAIResponsesInput.model_fields.keys())
|
307
|
+
openai_chat_allowed = base_allowed | set(OpenAIChatCompletionsInput.model_fields.keys())
|
308
|
+
openai_images_allowed = base_allowed | set(OpenAIImagesInput.model_fields.keys())
|
309
|
+
google_vertexai_images_allowed = base_allowed | set(GoogleVertexAIImagesInput.model_fields.keys())
|
310
|
+
|
311
|
+
# 各模型类型必填字段
|
312
|
+
google_required_fields = {"model", "contents"}
|
313
|
+
google_vertexai_image_required_fields = {"model", "prompt"}
|
314
|
+
|
315
|
+
openai_responses_required_fields = {"input", "model"}
|
316
|
+
openai_chat_required_fields = {"messages", "model"}
|
317
|
+
openai_image_required_fields = {"prompt"}
|
318
|
+
|
319
|
+
# 选择需要校验的字段集合
|
320
|
+
# 动态分支逻辑
|
321
|
+
match (self.provider, self.invoke_type):
|
322
|
+
case (ProviderType.GOOGLE, InvokeType.GENERATION):
|
323
|
+
allowed_fields = google_allowed
|
324
|
+
expected_fields = google_required_fields
|
325
|
+
case (ProviderType.GOOGLE, InvokeType.IMAGE_GENERATION):
|
326
|
+
allowed_fields = google_vertexai_images_allowed
|
327
|
+
expected_fields = google_vertexai_image_required_fields
|
328
|
+
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.RESPONSES | InvokeType.GENERATION):
|
329
|
+
allowed_fields = openai_responses_allowed
|
330
|
+
expected_fields = openai_responses_required_fields
|
331
|
+
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.CHAT_COMPLETIONS):
|
332
|
+
allowed_fields = openai_chat_allowed
|
333
|
+
expected_fields = openai_chat_required_fields
|
334
|
+
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.IMAGE_GENERATION):
|
335
|
+
allowed_fields = openai_images_allowed
|
336
|
+
expected_fields = openai_image_required_fields
|
337
|
+
case _:
|
338
|
+
raise ValueError(f"Unsupported provider/invoke_type combination: {self.provider} + {self.invoke_type}")
|
339
|
+
|
340
|
+
# 校验必填字段是否缺失
|
341
|
+
missing = [field for field in expected_fields if getattr(self, field, None) is None]
|
342
|
+
if missing:
|
343
|
+
raise ValueError(
|
344
|
+
f"Missing required fields for provider={self.provider} and invoke_type={self.invoke_type}: {missing}")
|
345
|
+
|
346
|
+
# 检查是否有非法字段
|
347
|
+
illegal_fields = []
|
348
|
+
valid_fields = {"provider", "channel", "invoke_type"} if self.invoke_type == InvokeType.IMAGE_GENERATION else {
|
349
|
+
"provider", "channel", "invoke_type", "stream"}
|
350
|
+
for name, value in self.__dict__.items():
|
351
|
+
if name in valid_fields:
|
352
|
+
continue
|
353
|
+
if name not in allowed_fields and value is not None and not isinstance(value, NotGiven):
|
354
|
+
illegal_fields.append(name)
|
355
|
+
|
356
|
+
if illegal_fields:
|
357
|
+
raise ValueError(
|
358
|
+
f"Unsupported fields for provider={self.provider} and invoke_type={self.invoke_type}: {illegal_fields}")
|
359
|
+
|
360
|
+
return self
|
361
|
+
|
362
|
+
|
363
|
+
class BatchModelRequest(BaseModel):
|
364
|
+
user_context: UserContext # 用户信息
|
365
|
+
items: List[BatchModelRequestItem] # 批量请求项列表
|
@@ -11,24 +11,24 @@ model_manager_client/generated/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5
|
|
11
11
|
model_manager_client/generated/model_service_pb2.py,sha256=ST84YYQk8x6UtQKIx6HprUxH5uGU4i3LhC8b-lHUQtA,3066
|
12
12
|
model_manager_client/generated/model_service_pb2_grpc.py,sha256=BzsINWQeACVnVzLVV0PgieZA25C2-EklMKlA-W50c6Y,5147
|
13
13
|
model_manager_client/schemas/__init__.py,sha256=AxuI-TcvA4OMTj2FtK4wAItvz9LrK_293pu3cmMLE7k,394
|
14
|
-
model_manager_client/schemas/inputs.py,sha256=
|
14
|
+
model_manager_client/schemas/inputs.py,sha256=3HUxnbuyQbuvMz1C46zydFYz-iEvLAUWVzOx7-eKS_I,14338
|
15
15
|
model_manager_client/schemas/outputs.py,sha256=M_fcqUtXPJnfiLabHlyA8BorlC5pYkf5KLjXO1ysKIQ,1031
|
16
16
|
tamar_model_client/__init__.py,sha256=LMECAuDARWHV1XzH3msoDXcyurS2eihRQmBy26_PUE0,328
|
17
|
-
tamar_model_client/async_client.py,sha256=
|
17
|
+
tamar_model_client/async_client.py,sha256=UQCvHlijt5VvseQJkXq0CyjKv-oOyzLk2oWLgq85P84,18661
|
18
18
|
tamar_model_client/auth.py,sha256=gbwW5Aakeb49PMbmYvrYlVx1mfyn1LEDJ4qQVs-9DA4,438
|
19
19
|
tamar_model_client/exceptions.py,sha256=jYU494OU_NeIa4X393V-Y73mTNm0JZ9yZApnlOM9CJQ,332
|
20
20
|
tamar_model_client/sync_client.py,sha256=o8b20fQUvtMq1gWax3_dfOpputYT4l9pRTz6cHdB0lg,4006
|
21
21
|
tamar_model_client/enums/__init__.py,sha256=3cYYn8ztNGBa_pI_5JGRVYf2QX8fkBVWdjID1PLvoBQ,182
|
22
22
|
tamar_model_client/enums/channel.py,sha256=wCzX579nNpTtwzGeS6S3Ls0UzVAgsOlfy4fXMzQTCAw,199
|
23
|
-
tamar_model_client/enums/invoke.py,sha256=
|
23
|
+
tamar_model_client/enums/invoke.py,sha256=WufImoN_87ZjGyzYitZkhNNFefWJehKfLtyP-DTBYlA,267
|
24
24
|
tamar_model_client/enums/providers.py,sha256=L_bX75K6KnWURoFizoitZ1Ybza7bmYDqXecNzNpgIrI,165
|
25
25
|
tamar_model_client/generated/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
26
26
|
tamar_model_client/generated/model_service_pb2.py,sha256=ST84YYQk8x6UtQKIx6HprUxH5uGU4i3LhC8b-lHUQtA,3066
|
27
27
|
tamar_model_client/generated/model_service_pb2_grpc.py,sha256=KW1UEGUim_kmSi_fCIjeLNVOp8nAPE08DgJOn6PY7PM,5145
|
28
28
|
tamar_model_client/schemas/__init__.py,sha256=AxuI-TcvA4OMTj2FtK4wAItvz9LrK_293pu3cmMLE7k,394
|
29
|
-
tamar_model_client/schemas/inputs.py,sha256=
|
29
|
+
tamar_model_client/schemas/inputs.py,sha256=Y9zzt-RoRklkxxe_3VJbZvPghJ00KUjHtFUmD0pCdHs,18721
|
30
30
|
tamar_model_client/schemas/outputs.py,sha256=M_fcqUtXPJnfiLabHlyA8BorlC5pYkf5KLjXO1ysKIQ,1031
|
31
|
-
tamar_model_client-0.1.
|
32
|
-
tamar_model_client-0.1.
|
33
|
-
tamar_model_client-0.1.
|
34
|
-
tamar_model_client-0.1.
|
31
|
+
tamar_model_client-0.1.4.dist-info/METADATA,sha256=xW11Mj_6s0Qm4fLg3gKRHoXCW4lcCnhexv-qxigqUTQ,16608
|
32
|
+
tamar_model_client-0.1.4.dist-info/WHEEL,sha256=ck4Vq1_RXyvS4Jt6SI0Vz6fyVs4GWg7AINwpsaGEgPE,91
|
33
|
+
tamar_model_client-0.1.4.dist-info/top_level.txt,sha256=_LfDhPv_fvON0PoZgQuo4M7EjoWtxPRoQOBJziJmip8,19
|
34
|
+
tamar_model_client-0.1.4.dist-info/RECORD,,
|
File without changes
|