tamar-model-client 0.1.16__tar.gz → 0.1.17__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/PKG-INFO +1 -1
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/setup.py +1 -1
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client/async_client.py +7 -3
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client/enums/invoke.py +2 -1
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client/schemas/inputs.py +54 -123
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client/sync_client.py +7 -3
- tamar_model_client-0.1.17/tamar_model_client/utils.py +118 -0
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client.egg-info/PKG-INFO +1 -1
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client.egg-info/SOURCES.txt +1 -0
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/README.md +0 -0
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/setup.cfg +0 -0
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client/__init__.py +0 -0
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client/auth.py +0 -0
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client/enums/__init__.py +0 -0
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client/enums/channel.py +0 -0
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client/enums/providers.py +0 -0
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client/exceptions.py +0 -0
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client/generated/__init__.py +0 -0
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client/generated/model_service_pb2.py +0 -0
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client/generated/model_service_pb2_grpc.py +0 -0
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client/schemas/__init__.py +0 -0
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client/schemas/outputs.py +0 -0
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client.egg-info/dependency_links.txt +0 -0
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client.egg-info/requires.txt +0 -0
- {tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client.egg-info/top_level.txt +0 -0
@@ -19,7 +19,7 @@ from .exceptions import ConnectionError
|
|
19
19
|
from .schemas import ModelRequest, ModelResponse, BatchModelRequest, BatchModelResponse
|
20
20
|
from .generated import model_service_pb2, model_service_pb2_grpc
|
21
21
|
from .schemas.inputs import GoogleGenAiInput, OpenAIResponsesInput, OpenAIChatCompletionsInput, \
|
22
|
-
GoogleVertexAIImagesInput, OpenAIImagesInput
|
22
|
+
GoogleVertexAIImagesInput, OpenAIImagesInput, OpenAIImagesEditInput
|
23
23
|
|
24
24
|
logger = logging.getLogger(__name__)
|
25
25
|
|
@@ -346,7 +346,7 @@ class AsyncTamarModelClient:
|
|
346
346
|
|
347
347
|
# 记录开始日志
|
348
348
|
logger.info(
|
349
|
-
f"🔵 Request Start | request_id: {request_id} | provider: {model_request.provider} | invoke_type: {model_request.invoke_type}
|
349
|
+
f"🔵 Request Start | request_id: {request_id} | provider: {model_request.provider} | invoke_type: {model_request.invoke_type}")
|
350
350
|
|
351
351
|
# 动态根据 provider/invoke_type 决定使用哪个 input 字段
|
352
352
|
try:
|
@@ -363,6 +363,8 @@ class AsyncTamarModelClient:
|
|
363
363
|
allowed_fields = OpenAIChatCompletionsInput.model_fields.keys()
|
364
364
|
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.IMAGE_GENERATION):
|
365
365
|
allowed_fields = OpenAIImagesInput.model_fields.keys()
|
366
|
+
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.IMAGE_EDIT_GENERATION):
|
367
|
+
allowed_fields = OpenAIImagesEditInput.model_fields.keys()
|
366
368
|
case _:
|
367
369
|
raise ValueError(
|
368
370
|
f"Unsupported provider/invoke_type combination: {model_request.provider} + {model_request.invoke_type}")
|
@@ -443,7 +445,7 @@ class AsyncTamarModelClient:
|
|
443
445
|
|
444
446
|
# 记录开始日志
|
445
447
|
logger.info(
|
446
|
-
f"🔵 Batch Request Start | request_id: {request_id} | batch_size: {len(batch_request_model.items)}
|
448
|
+
f"🔵 Batch Request Start | request_id: {request_id} | batch_size: {len(batch_request_model.items)}")
|
447
449
|
|
448
450
|
# 构造批量请求
|
449
451
|
items = []
|
@@ -461,6 +463,8 @@ class AsyncTamarModelClient:
|
|
461
463
|
allowed_fields = OpenAIChatCompletionsInput.model_fields.keys()
|
462
464
|
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.IMAGE_GENERATION):
|
463
465
|
allowed_fields = OpenAIImagesInput.model_fields.keys()
|
466
|
+
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.IMAGE_EDIT_GENERATION):
|
467
|
+
allowed_fields = OpenAIImagesEditInput.model_fields.keys()
|
464
468
|
case _:
|
465
469
|
raise ValueError(
|
466
470
|
f"Unsupported provider/invoke_type combination: {model_request_item.provider} + {model_request_item.invoke_type}")
|
{tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client/schemas/inputs.py
RENAMED
@@ -1,18 +1,22 @@
|
|
1
|
+
import mimetypes
|
2
|
+
import os
|
3
|
+
|
1
4
|
import httpx
|
2
5
|
from google.genai import types
|
3
6
|
from openai import NotGiven, NOT_GIVEN
|
4
|
-
from openai._types import Headers, Query, Body
|
7
|
+
from openai._types import Headers, Query, Body, FileTypes
|
5
8
|
from openai.types import ChatModel, Metadata, ReasoningEffort, ResponsesModel, Reasoning, ImageModel
|
6
9
|
from openai.types.chat import ChatCompletionMessageParam, ChatCompletionAudioParam, completion_create_params, \
|
7
10
|
ChatCompletionPredictionContentParam, ChatCompletionStreamOptionsParam, ChatCompletionToolChoiceOptionParam, \
|
8
11
|
ChatCompletionToolParam
|
9
12
|
from openai.types.responses import ResponseInputParam, ResponseIncludable, ResponseTextConfigParam, \
|
10
13
|
response_create_params, ToolParam
|
11
|
-
from pydantic import BaseModel, model_validator
|
12
|
-
from typing import List, Optional, Union, Iterable, Dict, Literal
|
14
|
+
from pydantic import BaseModel, model_validator, field_validator
|
15
|
+
from typing import List, Optional, Union, Iterable, Dict, Literal, IO
|
13
16
|
|
14
17
|
from tamar_model_client.enums import ProviderType, InvokeType
|
15
18
|
from tamar_model_client.enums.channel import Channel
|
19
|
+
from tamar_model_client.utils import convert_file_field, validate_fields_by_provider_and_invoke_type
|
16
20
|
|
17
21
|
|
18
22
|
class UserContext(BaseModel):
|
@@ -149,6 +153,29 @@ class OpenAIImagesInput(BaseModel):
|
|
149
153
|
}
|
150
154
|
|
151
155
|
|
156
|
+
class OpenAIImagesEditInput(BaseModel):
|
157
|
+
image: Union[FileTypes, List[FileTypes]]
|
158
|
+
prompt: str
|
159
|
+
background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN
|
160
|
+
mask: FileTypes | NotGiven = NOT_GIVEN
|
161
|
+
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN
|
162
|
+
n: Optional[int] | NotGiven = NOT_GIVEN
|
163
|
+
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN
|
164
|
+
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN
|
165
|
+
size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | NotGiven = NOT_GIVEN
|
166
|
+
user: str | NotGiven = NOT_GIVEN
|
167
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
168
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
169
|
+
extra_headers: Headers | None = None
|
170
|
+
extra_query: Query | None = None
|
171
|
+
extra_body: Body | None = None
|
172
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN
|
173
|
+
|
174
|
+
model_config = {
|
175
|
+
"arbitrary_types_allowed": True
|
176
|
+
}
|
177
|
+
|
178
|
+
|
152
179
|
class BaseRequest(BaseModel):
|
153
180
|
provider: ProviderType # 供应商,如 "openai", "google" 等
|
154
181
|
channel: Channel = Channel.NORMAL # 渠道:不同服务商之前有不同的调用SDK,这里指定是调用哪个SDK
|
@@ -212,8 +239,11 @@ class ModelRequestInput(BaseRequest):
|
|
212
239
|
contents: Optional[Union[types.ContentListUnion, types.ContentListUnionDict]] = None
|
213
240
|
config: Optional[types.GenerateContentConfigOrDict] = None
|
214
241
|
|
215
|
-
# OpenAIImagesInput + GoogleVertexAIImagesInput 合并字段
|
242
|
+
# OpenAIImagesInput + OpenAIImagesEditInput + GoogleVertexAIImagesInput 合并字段
|
243
|
+
image: Optional[Union[FileTypes, List[FileTypes]]] = None
|
216
244
|
prompt: Optional[str] = None
|
245
|
+
background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN
|
246
|
+
mask: FileTypes | NotGiven = NOT_GIVEN
|
217
247
|
negative_prompt: Optional[str] = None
|
218
248
|
aspect_ratio: Optional[Literal["1:1", "9:16", "16:9", "4:3", "3:4"]] = None
|
219
249
|
guidance_scale: Optional[float] = None
|
@@ -223,7 +253,8 @@ class ModelRequestInput(BaseRequest):
|
|
223
253
|
safety_filter_level: Optional[Literal["block_most", "block_some", "block_few", "block_fewest"]] = None
|
224
254
|
person_generation: Optional[Literal["dont_allow", "allow_adult", "allow_all"]] = None
|
225
255
|
quality: Optional[Literal["standard", "hd"]] | NotGiven = NOT_GIVEN
|
226
|
-
size: Optional[Literal[
|
256
|
+
size: Optional[Literal[
|
257
|
+
"auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN
|
227
258
|
style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN
|
228
259
|
number_of_images: Optional[int] = None # Google 用法
|
229
260
|
|
@@ -231,71 +262,26 @@ class ModelRequestInput(BaseRequest):
|
|
231
262
|
"arbitrary_types_allowed": True
|
232
263
|
}
|
233
264
|
|
265
|
+
@field_validator("image", mode="before")
|
266
|
+
@classmethod
|
267
|
+
def validate_image(cls, v):
|
268
|
+
return convert_file_field(v)
|
269
|
+
|
270
|
+
@field_validator("mask", mode="before")
|
271
|
+
@classmethod
|
272
|
+
def validate_mask(cls, v):
|
273
|
+
return convert_file_field(v)
|
274
|
+
|
234
275
|
|
235
276
|
class ModelRequest(ModelRequestInput):
|
236
277
|
user_context: UserContext # 用户信息
|
237
278
|
|
238
279
|
@model_validator(mode="after")
|
239
280
|
def validate_by_provider_and_invoke_type(self) -> "ModelRequest":
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
openai_responses_allowed = base_allowed | set(OpenAIResponsesInput.model_fields.keys())
|
245
|
-
openai_chat_allowed = base_allowed | set(OpenAIChatCompletionsInput.model_fields.keys())
|
246
|
-
openai_images_allowed = base_allowed | set(OpenAIImagesInput.model_fields.keys())
|
247
|
-
google_vertexai_images_allowed = base_allowed | set(GoogleVertexAIImagesInput.model_fields.keys())
|
248
|
-
|
249
|
-
# 各模型类型必填字段
|
250
|
-
google_required_fields = {"model", "contents"}
|
251
|
-
google_vertexai_image_required_fields = {"model", "prompt"}
|
252
|
-
|
253
|
-
openai_responses_required_fields = {"input", "model"}
|
254
|
-
openai_chat_required_fields = {"messages", "model"}
|
255
|
-
openai_image_required_fields = {"prompt"}
|
256
|
-
|
257
|
-
# 选择需要校验的字段集合
|
258
|
-
# 动态分支逻辑
|
259
|
-
match (self.provider, self.invoke_type):
|
260
|
-
case (ProviderType.GOOGLE, InvokeType.GENERATION):
|
261
|
-
allowed_fields = google_allowed
|
262
|
-
expected_fields = google_required_fields
|
263
|
-
case (ProviderType.GOOGLE, InvokeType.IMAGE_GENERATION):
|
264
|
-
allowed_fields = google_vertexai_images_allowed
|
265
|
-
expected_fields = google_vertexai_image_required_fields
|
266
|
-
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.RESPONSES | InvokeType.GENERATION):
|
267
|
-
allowed_fields = openai_responses_allowed
|
268
|
-
expected_fields = openai_responses_required_fields
|
269
|
-
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.CHAT_COMPLETIONS):
|
270
|
-
allowed_fields = openai_chat_allowed
|
271
|
-
expected_fields = openai_chat_required_fields
|
272
|
-
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.IMAGE_GENERATION):
|
273
|
-
allowed_fields = openai_images_allowed
|
274
|
-
expected_fields = openai_image_required_fields
|
275
|
-
case _:
|
276
|
-
raise ValueError(f"Unsupported provider/invoke_type combination: {self.provider} + {self.invoke_type}")
|
277
|
-
|
278
|
-
# 校验必填字段是否缺失
|
279
|
-
missing = [field for field in expected_fields if getattr(self, field, None) is None]
|
280
|
-
if missing:
|
281
|
-
raise ValueError(
|
282
|
-
f"Missing required fields for provider={self.provider} and invoke_type={self.invoke_type}: {missing}")
|
283
|
-
|
284
|
-
# 检查是否有非法字段
|
285
|
-
illegal_fields = []
|
286
|
-
valid_fields = {"provider", "channel", "invoke_type"} if self.invoke_type == InvokeType.IMAGE_GENERATION else {
|
287
|
-
"provider", "channel", "invoke_type", "stream"}
|
288
|
-
for name, value in self.__dict__.items():
|
289
|
-
if name in valid_fields:
|
290
|
-
continue
|
291
|
-
if name not in allowed_fields and value is not None and not isinstance(value, NotGiven):
|
292
|
-
illegal_fields.append(name)
|
293
|
-
|
294
|
-
if illegal_fields:
|
295
|
-
raise ValueError(
|
296
|
-
f"Unsupported fields for provider={self.provider} and invoke_type={self.invoke_type}: {illegal_fields}")
|
297
|
-
|
298
|
-
return self
|
281
|
+
return validate_fields_by_provider_and_invoke_type(
|
282
|
+
instance=self,
|
283
|
+
extra_allowed_fields={"provider", "channel", "invoke_type", "user_context"},
|
284
|
+
)
|
299
285
|
|
300
286
|
|
301
287
|
class BatchModelRequestItem(ModelRequestInput):
|
@@ -304,65 +290,10 @@ class BatchModelRequestItem(ModelRequestInput):
|
|
304
290
|
|
305
291
|
@model_validator(mode="after")
|
306
292
|
def validate_by_provider_and_invoke_type(self) -> "BatchModelRequestItem":
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
openai_responses_allowed = base_allowed | set(OpenAIResponsesInput.model_fields.keys())
|
312
|
-
openai_chat_allowed = base_allowed | set(OpenAIChatCompletionsInput.model_fields.keys())
|
313
|
-
openai_images_allowed = base_allowed | set(OpenAIImagesInput.model_fields.keys())
|
314
|
-
google_vertexai_images_allowed = base_allowed | set(GoogleVertexAIImagesInput.model_fields.keys())
|
315
|
-
|
316
|
-
# 各模型类型必填字段
|
317
|
-
google_required_fields = {"model", "contents"}
|
318
|
-
google_vertexai_image_required_fields = {"model", "prompt"}
|
319
|
-
|
320
|
-
openai_responses_required_fields = {"input", "model"}
|
321
|
-
openai_chat_required_fields = {"messages", "model"}
|
322
|
-
openai_image_required_fields = {"prompt"}
|
323
|
-
|
324
|
-
# 选择需要校验的字段集合
|
325
|
-
# 动态分支逻辑
|
326
|
-
match (self.provider, self.invoke_type):
|
327
|
-
case (ProviderType.GOOGLE, InvokeType.GENERATION):
|
328
|
-
allowed_fields = google_allowed
|
329
|
-
expected_fields = google_required_fields
|
330
|
-
case (ProviderType.GOOGLE, InvokeType.IMAGE_GENERATION):
|
331
|
-
allowed_fields = google_vertexai_images_allowed
|
332
|
-
expected_fields = google_vertexai_image_required_fields
|
333
|
-
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.RESPONSES | InvokeType.GENERATION):
|
334
|
-
allowed_fields = openai_responses_allowed
|
335
|
-
expected_fields = openai_responses_required_fields
|
336
|
-
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.CHAT_COMPLETIONS):
|
337
|
-
allowed_fields = openai_chat_allowed
|
338
|
-
expected_fields = openai_chat_required_fields
|
339
|
-
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.IMAGE_GENERATION):
|
340
|
-
allowed_fields = openai_images_allowed
|
341
|
-
expected_fields = openai_image_required_fields
|
342
|
-
case _:
|
343
|
-
raise ValueError(f"Unsupported provider/invoke_type combination: {self.provider} + {self.invoke_type}")
|
344
|
-
|
345
|
-
# 校验必填字段是否缺失
|
346
|
-
missing = [field for field in expected_fields if getattr(self, field, None) is None]
|
347
|
-
if missing:
|
348
|
-
raise ValueError(
|
349
|
-
f"Missing required fields for provider={self.provider} and invoke_type={self.invoke_type}: {missing}")
|
350
|
-
|
351
|
-
# 检查是否有非法字段
|
352
|
-
illegal_fields = []
|
353
|
-
valid_fields = {"provider", "channel", "invoke_type"} if self.invoke_type == InvokeType.IMAGE_GENERATION else {
|
354
|
-
"provider", "channel", "invoke_type", "stream"}
|
355
|
-
for name, value in self.__dict__.items():
|
356
|
-
if name in valid_fields:
|
357
|
-
continue
|
358
|
-
if name not in allowed_fields and value is not None and not isinstance(value, NotGiven):
|
359
|
-
illegal_fields.append(name)
|
360
|
-
|
361
|
-
if illegal_fields:
|
362
|
-
raise ValueError(
|
363
|
-
f"Unsupported fields for provider={self.provider} and invoke_type={self.invoke_type}: {illegal_fields}")
|
364
|
-
|
365
|
-
return self
|
293
|
+
return validate_fields_by_provider_and_invoke_type(
|
294
|
+
instance=self,
|
295
|
+
extra_allowed_fields={"provider", "channel", "invoke_type", "user_context", "custom_id"},
|
296
|
+
)
|
366
297
|
|
367
298
|
|
368
299
|
class BatchModelRequest(BaseModel):
|
@@ -17,7 +17,7 @@ from .exceptions import ConnectionError
|
|
17
17
|
from .generated import model_service_pb2, model_service_pb2_grpc
|
18
18
|
from .schemas import BatchModelResponse, ModelResponse
|
19
19
|
from .schemas.inputs import GoogleGenAiInput, GoogleVertexAIImagesInput, OpenAIResponsesInput, \
|
20
|
-
OpenAIChatCompletionsInput, OpenAIImagesInput, BatchModelRequest, ModelRequest
|
20
|
+
OpenAIChatCompletionsInput, OpenAIImagesInput, OpenAIImagesEditInput, BatchModelRequest, ModelRequest
|
21
21
|
|
22
22
|
logger = logging.getLogger(__name__)
|
23
23
|
|
@@ -299,7 +299,7 @@ class TamarModelClient:
|
|
299
299
|
|
300
300
|
# 记录开始日志
|
301
301
|
logger.info(
|
302
|
-
f"🔵 Request Start |
|
302
|
+
f"🔵 Request Start |provider: {model_request.provider} | invoke_type: {model_request.invoke_type}")
|
303
303
|
|
304
304
|
# 动态根据 provider/invoke_type 决定使用哪个 input 字段
|
305
305
|
try:
|
@@ -316,6 +316,8 @@ class TamarModelClient:
|
|
316
316
|
allowed_fields = OpenAIChatCompletionsInput.model_fields.keys()
|
317
317
|
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.IMAGE_GENERATION):
|
318
318
|
allowed_fields = OpenAIImagesInput.model_fields.keys()
|
319
|
+
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.IMAGE_EDIT_GENERATION):
|
320
|
+
allowed_fields = OpenAIImagesEditInput.model_fields.keys()
|
319
321
|
case _:
|
320
322
|
raise ValueError(
|
321
323
|
f"Unsupported provider/invoke_type combination: {model_request.provider} + {model_request.invoke_type}")
|
@@ -395,7 +397,7 @@ class TamarModelClient:
|
|
395
397
|
|
396
398
|
# 记录开始日志
|
397
399
|
logger.info(
|
398
|
-
f"🔵 Batch Request Start |
|
400
|
+
f"🔵 Batch Request Start | batch_size: {len(batch_request_model.items)}")
|
399
401
|
|
400
402
|
# 构造批量请求
|
401
403
|
items = []
|
@@ -413,6 +415,8 @@ class TamarModelClient:
|
|
413
415
|
allowed_fields = OpenAIChatCompletionsInput.model_fields.keys()
|
414
416
|
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.IMAGE_GENERATION):
|
415
417
|
allowed_fields = OpenAIImagesInput.model_fields.keys()
|
418
|
+
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.IMAGE_EDIT_GENERATION):
|
419
|
+
allowed_fields = OpenAIImagesEditInput.model_fields.keys()
|
416
420
|
case _:
|
417
421
|
raise ValueError(
|
418
422
|
f"Unsupported provider/invoke_type combination: {model_request_item.provider} + {model_request_item.invoke_type}")
|
@@ -0,0 +1,118 @@
|
|
1
|
+
from openai import NotGiven
|
2
|
+
from pydantic import BaseModel
|
3
|
+
from typing import Any
|
4
|
+
import os, mimetypes
|
5
|
+
|
6
|
+
def convert_file_field(value: Any) -> Any:
|
7
|
+
def is_file_like(obj):
|
8
|
+
return hasattr(obj, "read") and callable(obj.read)
|
9
|
+
|
10
|
+
def infer_mimetype(filename: str) -> str:
|
11
|
+
mime, _ = mimetypes.guess_type(filename)
|
12
|
+
return mime or "application/octet-stream"
|
13
|
+
|
14
|
+
def convert_item(item):
|
15
|
+
if is_file_like(item):
|
16
|
+
filename = os.path.basename(getattr(item, "name", "file.png"))
|
17
|
+
content_type = infer_mimetype(filename)
|
18
|
+
content = item.read()
|
19
|
+
if hasattr(item, "seek"):
|
20
|
+
item.seek(0)
|
21
|
+
return (filename, content, content_type)
|
22
|
+
elif isinstance(item, tuple):
|
23
|
+
parts = list(item)
|
24
|
+
if len(parts) > 1:
|
25
|
+
maybe_file = parts[1]
|
26
|
+
if is_file_like(maybe_file):
|
27
|
+
content = maybe_file.read()
|
28
|
+
if hasattr(maybe_file, "seek"):
|
29
|
+
maybe_file.seek(0)
|
30
|
+
parts[1] = content
|
31
|
+
elif not isinstance(maybe_file, (bytes, bytearray)):
|
32
|
+
raise ValueError(f"Unsupported second element in tuple: {type(maybe_file)}")
|
33
|
+
if len(parts) == 2:
|
34
|
+
parts.append(infer_mimetype(os.path.basename(parts[0] or "file.png")))
|
35
|
+
return tuple(parts)
|
36
|
+
else:
|
37
|
+
return item
|
38
|
+
|
39
|
+
if value is None:
|
40
|
+
return value
|
41
|
+
elif isinstance(value, list):
|
42
|
+
return [convert_item(v) for v in value]
|
43
|
+
else:
|
44
|
+
return convert_item(value)
|
45
|
+
|
46
|
+
|
47
|
+
def validate_fields_by_provider_and_invoke_type(
|
48
|
+
instance: BaseModel,
|
49
|
+
extra_allowed_fields: set[str],
|
50
|
+
extra_required_fields: set[str] = set()
|
51
|
+
) -> BaseModel:
|
52
|
+
"""
|
53
|
+
通用的字段校验逻辑,根据 provider 和 invoke_type 动态检查字段合法性和必填字段。
|
54
|
+
适用于 ModelRequest 和 BatchModelRequestItem。
|
55
|
+
"""
|
56
|
+
from tamar_model_client.enums import ProviderType, InvokeType
|
57
|
+
from tamar_model_client.schemas.inputs import GoogleGenAiInput, OpenAIResponsesInput, OpenAIChatCompletionsInput, \
|
58
|
+
OpenAIImagesInput, OpenAIImagesEditInput, GoogleVertexAIImagesInput
|
59
|
+
|
60
|
+
google_allowed = extra_allowed_fields | set(GoogleGenAiInput.model_fields)
|
61
|
+
openai_responses_allowed = extra_allowed_fields | set(OpenAIResponsesInput.model_fields)
|
62
|
+
openai_chat_allowed = extra_allowed_fields | set(OpenAIChatCompletionsInput.model_fields)
|
63
|
+
openai_images_allowed = extra_allowed_fields | set(OpenAIImagesInput.model_fields)
|
64
|
+
openai_images_edit_allowed = extra_allowed_fields | set(OpenAIImagesEditInput.model_fields)
|
65
|
+
google_vertexai_images_allowed = extra_allowed_fields | set(GoogleVertexAIImagesInput.model_fields)
|
66
|
+
|
67
|
+
google_required = {"model", "contents"}
|
68
|
+
google_vertex_required = {"model", "prompt"}
|
69
|
+
openai_resp_required = {"input", "model"}
|
70
|
+
openai_chat_required = {"messages", "model"}
|
71
|
+
openai_img_required = {"prompt"}
|
72
|
+
openai_edit_required = {"image", "prompt"}
|
73
|
+
|
74
|
+
match (instance.provider, instance.invoke_type):
|
75
|
+
case (ProviderType.GOOGLE, InvokeType.GENERATION):
|
76
|
+
allowed = google_allowed
|
77
|
+
required = google_required
|
78
|
+
case (ProviderType.GOOGLE, InvokeType.IMAGE_GENERATION):
|
79
|
+
allowed = google_vertexai_images_allowed
|
80
|
+
required = google_vertex_required
|
81
|
+
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.RESPONSES | InvokeType.GENERATION):
|
82
|
+
allowed = openai_responses_allowed
|
83
|
+
required = openai_resp_required
|
84
|
+
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.CHAT_COMPLETIONS):
|
85
|
+
allowed = openai_chat_allowed
|
86
|
+
required = openai_chat_required
|
87
|
+
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.IMAGE_GENERATION):
|
88
|
+
allowed = openai_images_allowed
|
89
|
+
required = openai_img_required
|
90
|
+
case ((ProviderType.OPENAI | ProviderType.AZURE), InvokeType.IMAGE_EDIT_GENERATION):
|
91
|
+
allowed = openai_images_edit_allowed
|
92
|
+
required = openai_edit_required
|
93
|
+
case _:
|
94
|
+
raise ValueError(f"Unsupported provider/invoke_type: {instance.provider} + {instance.invoke_type}")
|
95
|
+
|
96
|
+
required = required | extra_required_fields
|
97
|
+
|
98
|
+
missing = [f for f in required if getattr(instance, f, None) is None]
|
99
|
+
if missing:
|
100
|
+
raise ValueError(
|
101
|
+
f"Missing required fields for provider={instance.provider} and invoke_type={instance.invoke_type}: {missing}")
|
102
|
+
|
103
|
+
illegal = []
|
104
|
+
valid_fields = {"provider", "channel", "invoke_type"}
|
105
|
+
if getattr(instance, "stream", None) is not None:
|
106
|
+
valid_fields.add("stream")
|
107
|
+
|
108
|
+
for k, v in instance.__dict__.items():
|
109
|
+
if k in valid_fields:
|
110
|
+
continue
|
111
|
+
if k not in allowed and v is not None and not isinstance(v, NotGiven):
|
112
|
+
illegal.append(k)
|
113
|
+
|
114
|
+
if illegal:
|
115
|
+
raise ValueError(
|
116
|
+
f"Unsupported fields for provider={instance.provider} and invoke_type={instance.invoke_type}: {illegal}")
|
117
|
+
|
118
|
+
return instance
|
{tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client.egg-info/SOURCES.txt
RENAMED
@@ -5,6 +5,7 @@ tamar_model_client/async_client.py
|
|
5
5
|
tamar_model_client/auth.py
|
6
6
|
tamar_model_client/exceptions.py
|
7
7
|
tamar_model_client/sync_client.py
|
8
|
+
tamar_model_client/utils.py
|
8
9
|
tamar_model_client.egg-info/PKG-INFO
|
9
10
|
tamar_model_client.egg-info/SOURCES.txt
|
10
11
|
tamar_model_client.egg-info/dependency_links.txt
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client/enums/__init__.py
RENAMED
File without changes
|
File without changes
|
{tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client/enums/providers.py
RENAMED
File without changes
|
File without changes
|
{tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client/generated/__init__.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
{tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client/schemas/__init__.py
RENAMED
File without changes
|
{tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client/schemas/outputs.py
RENAMED
File without changes
|
File without changes
|
{tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client.egg-info/requires.txt
RENAMED
File without changes
|
{tamar_model_client-0.1.16 → tamar_model_client-0.1.17}/tamar_model_client.egg-info/top_level.txt
RENAMED
File without changes
|