tamar-model-client 0.2.3__py3-none-any.whl → 0.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tamar_model_client/schemas/inputs.py +53 -27
- {tamar_model_client-0.2.3.dist-info → tamar_model_client-0.2.4.dist-info}/METADATA +1 -1
- {tamar_model_client-0.2.3.dist-info → tamar_model_client-0.2.4.dist-info}/RECORD +5 -5
- {tamar_model_client-0.2.3.dist-info → tamar_model_client-0.2.4.dist-info}/WHEEL +0 -0
- {tamar_model_client-0.2.3.dist-info → tamar_model_client-0.2.4.dist-info}/top_level.txt +0 -0
@@ -11,8 +11,9 @@ from openai.types.chat import ChatCompletionMessageParam, ChatCompletionAudioPar
|
|
11
11
|
ChatCompletionToolParam
|
12
12
|
from openai.types.responses import ResponseInputParam, ResponseIncludable, ResponseTextConfigParam, \
|
13
13
|
response_create_params, ToolParam
|
14
|
+
from openai.types.responses.response_prompt_param import ResponsePromptParam
|
14
15
|
from pydantic import BaseModel, model_validator, field_validator
|
15
|
-
from typing import List, Optional, Union, Iterable, Dict, Literal
|
16
|
+
from typing import List, Optional, Union, Iterable, Dict, Literal
|
16
17
|
|
17
18
|
from tamar_model_client.enums import ProviderType, InvokeType
|
18
19
|
from tamar_model_client.enums.channel import Channel
|
@@ -59,24 +60,33 @@ class GoogleVertexAIImagesInput(BaseModel):
|
|
59
60
|
|
60
61
|
|
61
62
|
class OpenAIResponsesInput(BaseModel):
|
62
|
-
|
63
|
-
model: ResponsesModel
|
63
|
+
background: Optional[bool] | NotGiven = NOT_GIVEN
|
64
64
|
include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN
|
65
|
+
input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN
|
65
66
|
instructions: Optional[str] | NotGiven = NOT_GIVEN
|
66
67
|
max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN
|
68
|
+
max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN
|
67
69
|
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN
|
70
|
+
model: ResponsesModel | NotGiven = NOT_GIVEN
|
68
71
|
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN
|
69
72
|
previous_response_id: Optional[str] | NotGiven = NOT_GIVEN
|
73
|
+
prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN
|
74
|
+
prompt_cache_key: str | NotGiven = NOT_GIVEN
|
70
75
|
reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN
|
76
|
+
safety_identifier: str | NotGiven = NOT_GIVEN
|
77
|
+
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN
|
71
78
|
store: Optional[bool] | NotGiven = NOT_GIVEN
|
72
79
|
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN
|
80
|
+
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN
|
73
81
|
temperature: Optional[float] | NotGiven = NOT_GIVEN
|
74
82
|
text: ResponseTextConfigParam | NotGiven = NOT_GIVEN
|
75
83
|
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN
|
76
84
|
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN
|
85
|
+
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN
|
77
86
|
top_p: Optional[float] | NotGiven = NOT_GIVEN
|
78
87
|
truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN
|
79
88
|
user: str | NotGiven = NOT_GIVEN
|
89
|
+
verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN
|
80
90
|
extra_headers: Headers | None = None
|
81
91
|
extra_query: Query | None = None
|
82
92
|
extra_body: Body | None = None
|
@@ -104,10 +114,12 @@ class OpenAIChatCompletionsInput(BaseModel):
|
|
104
114
|
parallel_tool_calls: bool | NotGiven = NOT_GIVEN
|
105
115
|
prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN
|
106
116
|
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN
|
117
|
+
prompt_cache_key: str | NotGiven = NOT_GIVEN
|
107
118
|
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN
|
108
119
|
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN
|
120
|
+
safety_identifier: str | NotGiven = NOT_GIVEN
|
109
121
|
seed: Optional[int] | NotGiven = NOT_GIVEN
|
110
|
-
service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN
|
122
|
+
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN
|
111
123
|
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN
|
112
124
|
store: Optional[bool] | NotGiven = NOT_GIVEN
|
113
125
|
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN
|
@@ -118,6 +130,7 @@ class OpenAIChatCompletionsInput(BaseModel):
|
|
118
130
|
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN
|
119
131
|
top_p: Optional[float] | NotGiven = NOT_GIVEN
|
120
132
|
user: str | NotGiven = NOT_GIVEN
|
133
|
+
verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN
|
121
134
|
web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN
|
122
135
|
extra_headers: Headers | None = None
|
123
136
|
extra_query: Query | None = None
|
@@ -166,7 +179,7 @@ class OpenAIImagesEditInput(BaseModel):
|
|
166
179
|
output_compression: Optional[int] | NotGiven = NOT_GIVEN
|
167
180
|
output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN
|
168
181
|
partial_images: Optional[int] | NotGiven = NOT_GIVEN
|
169
|
-
quality: Optional[Literal["standard", "
|
182
|
+
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN
|
170
183
|
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN
|
171
184
|
size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | NotGiven = NOT_GIVEN
|
172
185
|
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN
|
@@ -191,33 +204,44 @@ class ModelRequestInput(BaseRequest):
|
|
191
204
|
# 合并 model 字段
|
192
205
|
model: Optional[Union[str, ResponsesModel, ChatModel, ImageModel]] = None
|
193
206
|
|
194
|
-
# OpenAI Responses Input
|
207
|
+
# OpenAI Responses Input(合并)
|
195
208
|
input: Optional[Union[str, ResponseInputParam]] = None
|
196
209
|
include: Optional[Union[List[ResponseIncludable], NotGiven]] = NOT_GIVEN
|
197
210
|
instructions: Optional[Union[str, NotGiven]] = NOT_GIVEN
|
198
211
|
max_output_tokens: Optional[Union[int, NotGiven]] = NOT_GIVEN
|
212
|
+
max_tool_calls: Optional[Union[int, NotGiven]] = NOT_GIVEN
|
199
213
|
metadata: Optional[Union[Metadata, NotGiven]] = NOT_GIVEN
|
200
214
|
parallel_tool_calls: Optional[Union[bool, NotGiven]] = NOT_GIVEN
|
201
215
|
previous_response_id: Optional[Union[str, NotGiven]] = NOT_GIVEN
|
216
|
+
# prompt 同名字段合并:Responses 的 ResponsePromptParam + 图片生成的 str
|
217
|
+
prompt: Optional[Union[str, ResponsePromptParam, NotGiven]] = NOT_GIVEN
|
218
|
+
prompt_cache_key: Optional[Union[str, NotGiven]] = NOT_GIVEN
|
202
219
|
reasoning: Optional[Union[Reasoning, NotGiven]] = NOT_GIVEN
|
220
|
+
safety_identifier: Optional[Union[str, NotGiven]] = NOT_GIVEN
|
221
|
+
service_tier: Optional[Union[Literal["auto", "default", "flex", "scale", "priority"], NotGiven]] = NOT_GIVEN
|
203
222
|
store: Optional[Union[bool, NotGiven]] = NOT_GIVEN
|
204
223
|
stream: Optional[Union[Literal[False], Literal[True], NotGiven]] = NOT_GIVEN
|
224
|
+
# 合并两套 stream_options 类型
|
225
|
+
stream_options: Optional[
|
226
|
+
Union[response_create_params.StreamOptions, ChatCompletionStreamOptionsParam, NotGiven]] = NOT_GIVEN
|
205
227
|
temperature: Optional[Union[float, NotGiven]] = NOT_GIVEN
|
206
228
|
text: Optional[Union[ResponseTextConfigParam, NotGiven]] = NOT_GIVEN
|
207
229
|
tool_choice: Optional[
|
208
230
|
Union[response_create_params.ToolChoice, ChatCompletionToolChoiceOptionParam, NotGiven]
|
209
231
|
] = NOT_GIVEN
|
210
232
|
tools: Optional[Union[Iterable[ToolParam], Iterable[ChatCompletionToolParam], NotGiven]] = NOT_GIVEN
|
233
|
+
top_logprobs: Optional[Union[int, NotGiven]] = NOT_GIVEN
|
211
234
|
top_p: Optional[Union[float, NotGiven]] = NOT_GIVEN
|
212
235
|
truncation: Optional[Union[Literal["auto", "disabled"], NotGiven]] = NOT_GIVEN
|
213
236
|
user: Optional[Union[str, NotGiven]] = NOT_GIVEN
|
237
|
+
verbosity: Optional[Union[Literal["low", "medium", "high"], NotGiven]] = NOT_GIVEN
|
214
238
|
|
215
239
|
extra_headers: Optional[Union[Headers, None]] = None
|
216
240
|
extra_query: Optional[Union[Query, None]] = None
|
217
241
|
extra_body: Optional[Union[Body, None]] = None
|
218
242
|
timeout: Optional[Union[float, httpx.Timeout, None, NotGiven]] = NOT_GIVEN
|
219
243
|
|
220
|
-
# OpenAI Chat Completions Input
|
244
|
+
# OpenAI Chat Completions Input(合并)
|
221
245
|
messages: Optional[Iterable[ChatCompletionMessageParam]] = None
|
222
246
|
audio: Optional[Union[ChatCompletionAudioParam, NotGiven]] = NOT_GIVEN
|
223
247
|
frequency_penalty: Optional[Union[float, NotGiven]] = NOT_GIVEN
|
@@ -226,34 +250,32 @@ class ModelRequestInput(BaseRequest):
|
|
226
250
|
logit_bias: Optional[Union[Dict[str, int], NotGiven]] = NOT_GIVEN
|
227
251
|
logprobs: Optional[Union[bool, NotGiven]] = NOT_GIVEN
|
228
252
|
max_completion_tokens: Optional[Union[int, NotGiven]] = NOT_GIVEN
|
253
|
+
max_tokens: Optional[Union[int, NotGiven]] = NOT_GIVEN
|
229
254
|
modalities: Optional[Union[List[Literal["text", "audio"]], NotGiven]] = NOT_GIVEN
|
230
|
-
n: Optional[Union[int, NotGiven]] = NOT_GIVEN
|
255
|
+
n: Optional[Union[int, NotGiven]] = NOT_GIVEN # 复用给 Chat 和 Images
|
231
256
|
prediction: Optional[Union[ChatCompletionPredictionContentParam, NotGiven]] = NOT_GIVEN
|
232
257
|
presence_penalty: Optional[Union[float, NotGiven]] = NOT_GIVEN
|
233
258
|
reasoning_effort: Optional[Union[ReasoningEffort, NotGiven]] = NOT_GIVEN
|
234
259
|
response_format: Optional[
|
235
|
-
Union[Literal["url", "b64_json"],
|
236
|
-
|
237
|
-
|
238
|
-
stop: Optional[Union[Optional[str], List[str], None, NotGiven]] = NOT_GIVEN
|
239
|
-
top_logprobs: Optional[Union[int, NotGiven]] = NOT_GIVEN
|
260
|
+
Union[completion_create_params.ResponseFormat, Literal["url", "b64_json"], NotGiven]
|
261
|
+
] = NOT_GIVEN
|
262
|
+
seed: Optional[Union[int, NotGiven]] = NOT_GIVEN # Chat/Vertex Images 共用
|
240
263
|
web_search_options: Optional[Union[completion_create_params.WebSearchOptions, NotGiven]] = NOT_GIVEN
|
241
|
-
stream_options: Optional[Union[ChatCompletionStreamOptionsParam, NotGiven]] = NOT_GIVEN
|
242
264
|
|
243
265
|
# Google GenAI Input
|
244
266
|
contents: Optional[Union[types.ContentListUnion, types.ContentListUnionDict]] = None
|
245
267
|
config: Optional[types.GenerateContentConfigOrDict] = None
|
246
268
|
|
247
|
-
#
|
269
|
+
# Images(OpenAI Images / Images Edit / Google Vertex Images 合并)
|
248
270
|
image: Optional[Union[FileTypes, List[FileTypes]]] = None
|
249
|
-
|
250
|
-
background: Optional[Literal["transparent", "opaque", "auto"]
|
251
|
-
moderation: Optional[Literal["low", "auto"]
|
252
|
-
input_fidelity: Optional[Literal["high", "low"]
|
253
|
-
output_compression: Optional[int
|
254
|
-
output_format: Optional[Literal["png", "jpeg", "webp"]
|
255
|
-
partial_images: Optional[int
|
256
|
-
mask: FileTypes
|
271
|
+
# background 同名字段合并:Responses 的 bool(后台任务)+ Images 的透明度枚举
|
272
|
+
background: Optional[Union[bool, Literal["transparent", "opaque", "auto"], NotGiven]] = NOT_GIVEN
|
273
|
+
moderation: Optional[Union[Literal["low", "auto"], NotGiven]] = NOT_GIVEN
|
274
|
+
input_fidelity: Optional[Union[Literal["high", "low"], NotGiven]] = NOT_GIVEN
|
275
|
+
output_compression: Optional[Union[int, NotGiven]] = NOT_GIVEN
|
276
|
+
output_format: Optional[Union[Literal["png", "jpeg", "webp"], NotGiven]] = NOT_GIVEN
|
277
|
+
partial_images: Optional[Union[int, NotGiven]] = NOT_GIVEN
|
278
|
+
mask: Union[FileTypes, NotGiven] = NOT_GIVEN
|
257
279
|
negative_prompt: Optional[str] = None
|
258
280
|
aspect_ratio: Optional[Literal["1:1", "9:16", "16:9", "4:3", "3:4"]] = None
|
259
281
|
guidance_scale: Optional[float] = None
|
@@ -262,10 +284,14 @@ class ModelRequestInput(BaseRequest):
|
|
262
284
|
add_watermark: Optional[bool] = None
|
263
285
|
safety_filter_level: Optional[Literal["block_most", "block_some", "block_few", "block_fewest"]] = None
|
264
286
|
person_generation: Optional[Literal["dont_allow", "allow_adult", "allow_all"]] = None
|
265
|
-
quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]
|
266
|
-
size: Optional[
|
267
|
-
|
268
|
-
|
287
|
+
quality: Optional[Union[Literal["standard", "hd", "low", "medium", "high", "auto"], NotGiven]] = NOT_GIVEN
|
288
|
+
size: Optional[
|
289
|
+
Union[
|
290
|
+
Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"],
|
291
|
+
NotGiven,
|
292
|
+
]
|
293
|
+
] = NOT_GIVEN
|
294
|
+
style: Optional[Union[Literal["vivid", "natural"], NotGiven]] = NOT_GIVEN
|
269
295
|
number_of_images: Optional[int] = None # Google 用法
|
270
296
|
|
271
297
|
model_config = {
|
@@ -24,7 +24,7 @@ tamar_model_client/generated/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NM
|
|
24
24
|
tamar_model_client/generated/model_service_pb2.py,sha256=RI6wNSmgmylzWPedFfPxx938UzS7kcPR58YTzYshcL8,3066
|
25
25
|
tamar_model_client/generated/model_service_pb2_grpc.py,sha256=k4tIbp3XBxdyuOVR18Ung_4SUryONB51UYf_uUEl6V4,5145
|
26
26
|
tamar_model_client/schemas/__init__.py,sha256=AxuI-TcvA4OMTj2FtK4wAItvz9LrK_293pu3cmMLE7k,394
|
27
|
-
tamar_model_client/schemas/inputs.py,sha256=
|
27
|
+
tamar_model_client/schemas/inputs.py,sha256=ilSZxcnXubX-yndz6X7y_mZnx19f5IGmWoKPSm0L_R8,17069
|
28
28
|
tamar_model_client/schemas/outputs.py,sha256=M_fcqUtXPJnfiLabHlyA8BorlC5pYkf5KLjXO1ysKIQ,1031
|
29
29
|
tests/__init__.py,sha256=kbmImddLDwdqlkkmkyKtl4bQy_ipe-R8eskpaBylU9w,38
|
30
30
|
tests/stream_hanging_analysis.py,sha256=W3W48IhQbNAR6-xvMpoWZvnWOnr56CTaH4-aORNBuD4,14807
|
@@ -32,7 +32,7 @@ tests/test_circuit_breaker.py,sha256=nhEBnyXFjIYjRWlUdu7Z9PnPq48ypbBK6fxN6deHedw
|
|
32
32
|
tests/test_google_azure_final.py,sha256=Cx2lfnoj48_7pUjpCYbrx6OLJF4cI79McV24_EYt_8s,55093
|
33
33
|
tests/test_logging_issue.py,sha256=JTMbotfHpAEPMBj73pOwxPn-Zn4QVQJX6scMz48FRDQ,2427
|
34
34
|
tests/test_simple.py,sha256=Xf0U-J9_xn_LzUsmYu06suK0_7DrPeko8OHoHldsNxE,7169
|
35
|
-
tamar_model_client-0.2.
|
36
|
-
tamar_model_client-0.2.
|
37
|
-
tamar_model_client-0.2.
|
38
|
-
tamar_model_client-0.2.
|
35
|
+
tamar_model_client-0.2.4.dist-info/METADATA,sha256=YN-OEy64kB_c1f9sTYRxciIdbn5KSAIeRXECpR7xKLU,41309
|
36
|
+
tamar_model_client-0.2.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
37
|
+
tamar_model_client-0.2.4.dist-info/top_level.txt,sha256=f1I-S8iWN-cgv4gB8gxRg9jJOTJMumvm4oGKVPfGg6A,25
|
38
|
+
tamar_model_client-0.2.4.dist-info/RECORD,,
|
File without changes
|
File without changes
|