tamar-model-client 0.2.3__tar.gz → 0.2.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/PKG-INFO +1 -1
  2. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/setup.py +1 -1
  3. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/schemas/inputs.py +53 -27
  4. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client.egg-info/PKG-INFO +1 -1
  5. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/README.md +0 -0
  6. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/setup.cfg +0 -0
  7. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/__init__.py +0 -0
  8. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/async_client.py +0 -0
  9. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/auth.py +0 -0
  10. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/circuit_breaker.py +0 -0
  11. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/core/__init__.py +0 -0
  12. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/core/base_client.py +0 -0
  13. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/core/http_fallback.py +0 -0
  14. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/core/logging_setup.py +0 -0
  15. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/core/request_builder.py +0 -0
  16. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/core/request_id_manager.py +0 -0
  17. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/core/response_handler.py +0 -0
  18. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/core/utils.py +0 -0
  19. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/enums/__init__.py +0 -0
  20. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/enums/channel.py +0 -0
  21. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/enums/invoke.py +0 -0
  22. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/enums/providers.py +0 -0
  23. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/error_handler.py +0 -0
  24. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/exceptions.py +0 -0
  25. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/generated/__init__.py +0 -0
  26. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/generated/model_service_pb2.py +0 -0
  27. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/generated/model_service_pb2_grpc.py +0 -0
  28. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/json_formatter.py +0 -0
  29. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/logging_icons.py +0 -0
  30. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/schemas/__init__.py +0 -0
  31. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/schemas/outputs.py +0 -0
  32. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/sync_client.py +0 -0
  33. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client/utils.py +0 -0
  34. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client.egg-info/SOURCES.txt +0 -0
  35. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client.egg-info/dependency_links.txt +0 -0
  36. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client.egg-info/requires.txt +0 -0
  37. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tamar_model_client.egg-info/top_level.txt +0 -0
  38. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tests/__init__.py +0 -0
  39. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tests/test_circuit_breaker.py +0 -0
  40. {tamar_model_client-0.2.3 → tamar_model_client-0.2.4}/tests/test_google_azure_final.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tamar-model-client
3
- Version: 0.2.3
3
+ Version: 0.2.4
4
4
  Summary: A Python SDK for interacting with the Model Manager gRPC service
5
5
  Home-page: http://gitlab.tamaredge.top/project-tap/AgentOS/model-manager-client
6
6
  Author: Oscar Ou
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name="tamar-model-client",
5
- version="0.2.3",
5
+ version="0.2.4",
6
6
  description="A Python SDK for interacting with the Model Manager gRPC service",
7
7
  author="Oscar Ou",
8
8
  author_email="oscar.ou@tamaredge.ai",
@@ -11,8 +11,9 @@ from openai.types.chat import ChatCompletionMessageParam, ChatCompletionAudioPar
11
11
  ChatCompletionToolParam
12
12
  from openai.types.responses import ResponseInputParam, ResponseIncludable, ResponseTextConfigParam, \
13
13
  response_create_params, ToolParam
14
+ from openai.types.responses.response_prompt_param import ResponsePromptParam
14
15
  from pydantic import BaseModel, model_validator, field_validator
15
- from typing import List, Optional, Union, Iterable, Dict, Literal, IO
16
+ from typing import List, Optional, Union, Iterable, Dict, Literal
16
17
 
17
18
  from tamar_model_client.enums import ProviderType, InvokeType
18
19
  from tamar_model_client.enums.channel import Channel
@@ -59,24 +60,33 @@ class GoogleVertexAIImagesInput(BaseModel):
59
60
 
60
61
 
61
62
  class OpenAIResponsesInput(BaseModel):
62
- input: Union[str, ResponseInputParam]
63
- model: ResponsesModel
63
+ background: Optional[bool] | NotGiven = NOT_GIVEN
64
64
  include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN
65
+ input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN
65
66
  instructions: Optional[str] | NotGiven = NOT_GIVEN
66
67
  max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN
68
+ max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN
67
69
  metadata: Optional[Metadata] | NotGiven = NOT_GIVEN
70
+ model: ResponsesModel | NotGiven = NOT_GIVEN
68
71
  parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN
69
72
  previous_response_id: Optional[str] | NotGiven = NOT_GIVEN
73
+ prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN
74
+ prompt_cache_key: str | NotGiven = NOT_GIVEN
70
75
  reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN
76
+ safety_identifier: str | NotGiven = NOT_GIVEN
77
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN
71
78
  store: Optional[bool] | NotGiven = NOT_GIVEN
72
79
  stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN
80
+ stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN
73
81
  temperature: Optional[float] | NotGiven = NOT_GIVEN
74
82
  text: ResponseTextConfigParam | NotGiven = NOT_GIVEN
75
83
  tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN
76
84
  tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN
85
+ top_logprobs: Optional[int] | NotGiven = NOT_GIVEN
77
86
  top_p: Optional[float] | NotGiven = NOT_GIVEN
78
87
  truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN
79
88
  user: str | NotGiven = NOT_GIVEN
89
+ verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN
80
90
  extra_headers: Headers | None = None
81
91
  extra_query: Query | None = None
82
92
  extra_body: Body | None = None
@@ -104,10 +114,12 @@ class OpenAIChatCompletionsInput(BaseModel):
104
114
  parallel_tool_calls: bool | NotGiven = NOT_GIVEN
105
115
  prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN
106
116
  presence_penalty: Optional[float] | NotGiven = NOT_GIVEN
117
+ prompt_cache_key: str | NotGiven = NOT_GIVEN
107
118
  reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN
108
119
  response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN
120
+ safety_identifier: str | NotGiven = NOT_GIVEN
109
121
  seed: Optional[int] | NotGiven = NOT_GIVEN
110
- service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN
122
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN
111
123
  stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN
112
124
  store: Optional[bool] | NotGiven = NOT_GIVEN
113
125
  stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN
@@ -118,6 +130,7 @@ class OpenAIChatCompletionsInput(BaseModel):
118
130
  top_logprobs: Optional[int] | NotGiven = NOT_GIVEN
119
131
  top_p: Optional[float] | NotGiven = NOT_GIVEN
120
132
  user: str | NotGiven = NOT_GIVEN
133
+ verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN
121
134
  web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN
122
135
  extra_headers: Headers | None = None
123
136
  extra_query: Query | None = None
@@ -166,7 +179,7 @@ class OpenAIImagesEditInput(BaseModel):
166
179
  output_compression: Optional[int] | NotGiven = NOT_GIVEN
167
180
  output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN
168
181
  partial_images: Optional[int] | NotGiven = NOT_GIVEN
169
- quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN
182
+ quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN
170
183
  response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN
171
184
  size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | NotGiven = NOT_GIVEN
172
185
  stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN
@@ -191,33 +204,44 @@ class ModelRequestInput(BaseRequest):
191
204
  # 合并 model 字段
192
205
  model: Optional[Union[str, ResponsesModel, ChatModel, ImageModel]] = None
193
206
 
194
- # OpenAI Responses Input
207
+ # OpenAI Responses Input(合并)
195
208
  input: Optional[Union[str, ResponseInputParam]] = None
196
209
  include: Optional[Union[List[ResponseIncludable], NotGiven]] = NOT_GIVEN
197
210
  instructions: Optional[Union[str, NotGiven]] = NOT_GIVEN
198
211
  max_output_tokens: Optional[Union[int, NotGiven]] = NOT_GIVEN
212
+ max_tool_calls: Optional[Union[int, NotGiven]] = NOT_GIVEN
199
213
  metadata: Optional[Union[Metadata, NotGiven]] = NOT_GIVEN
200
214
  parallel_tool_calls: Optional[Union[bool, NotGiven]] = NOT_GIVEN
201
215
  previous_response_id: Optional[Union[str, NotGiven]] = NOT_GIVEN
216
+ # prompt 同名字段合并:Responses 的 ResponsePromptParam + 图片生成的 str
217
+ prompt: Optional[Union[str, ResponsePromptParam, NotGiven]] = NOT_GIVEN
218
+ prompt_cache_key: Optional[Union[str, NotGiven]] = NOT_GIVEN
202
219
  reasoning: Optional[Union[Reasoning, NotGiven]] = NOT_GIVEN
220
+ safety_identifier: Optional[Union[str, NotGiven]] = NOT_GIVEN
221
+ service_tier: Optional[Union[Literal["auto", "default", "flex", "scale", "priority"], NotGiven]] = NOT_GIVEN
203
222
  store: Optional[Union[bool, NotGiven]] = NOT_GIVEN
204
223
  stream: Optional[Union[Literal[False], Literal[True], NotGiven]] = NOT_GIVEN
224
+ # 合并两套 stream_options 类型
225
+ stream_options: Optional[
226
+ Union[response_create_params.StreamOptions, ChatCompletionStreamOptionsParam, NotGiven]] = NOT_GIVEN
205
227
  temperature: Optional[Union[float, NotGiven]] = NOT_GIVEN
206
228
  text: Optional[Union[ResponseTextConfigParam, NotGiven]] = NOT_GIVEN
207
229
  tool_choice: Optional[
208
230
  Union[response_create_params.ToolChoice, ChatCompletionToolChoiceOptionParam, NotGiven]
209
231
  ] = NOT_GIVEN
210
232
  tools: Optional[Union[Iterable[ToolParam], Iterable[ChatCompletionToolParam], NotGiven]] = NOT_GIVEN
233
+ top_logprobs: Optional[Union[int, NotGiven]] = NOT_GIVEN
211
234
  top_p: Optional[Union[float, NotGiven]] = NOT_GIVEN
212
235
  truncation: Optional[Union[Literal["auto", "disabled"], NotGiven]] = NOT_GIVEN
213
236
  user: Optional[Union[str, NotGiven]] = NOT_GIVEN
237
+ verbosity: Optional[Union[Literal["low", "medium", "high"], NotGiven]] = NOT_GIVEN
214
238
 
215
239
  extra_headers: Optional[Union[Headers, None]] = None
216
240
  extra_query: Optional[Union[Query, None]] = None
217
241
  extra_body: Optional[Union[Body, None]] = None
218
242
  timeout: Optional[Union[float, httpx.Timeout, None, NotGiven]] = NOT_GIVEN
219
243
 
220
- # OpenAI Chat Completions Input
244
+ # OpenAI Chat Completions Input(合并)
221
245
  messages: Optional[Iterable[ChatCompletionMessageParam]] = None
222
246
  audio: Optional[Union[ChatCompletionAudioParam, NotGiven]] = NOT_GIVEN
223
247
  frequency_penalty: Optional[Union[float, NotGiven]] = NOT_GIVEN
@@ -226,34 +250,32 @@ class ModelRequestInput(BaseRequest):
226
250
  logit_bias: Optional[Union[Dict[str, int], NotGiven]] = NOT_GIVEN
227
251
  logprobs: Optional[Union[bool, NotGiven]] = NOT_GIVEN
228
252
  max_completion_tokens: Optional[Union[int, NotGiven]] = NOT_GIVEN
253
+ max_tokens: Optional[Union[int, NotGiven]] = NOT_GIVEN
229
254
  modalities: Optional[Union[List[Literal["text", "audio"]], NotGiven]] = NOT_GIVEN
230
- n: Optional[Union[int, NotGiven]] = NOT_GIVEN
255
+ n: Optional[Union[int, NotGiven]] = NOT_GIVEN # 复用给 Chat 和 Images
231
256
  prediction: Optional[Union[ChatCompletionPredictionContentParam, NotGiven]] = NOT_GIVEN
232
257
  presence_penalty: Optional[Union[float, NotGiven]] = NOT_GIVEN
233
258
  reasoning_effort: Optional[Union[ReasoningEffort, NotGiven]] = NOT_GIVEN
234
259
  response_format: Optional[
235
- Union[Literal["url", "b64_json"], completion_create_params.ResponseFormat, NotGiven]] = NOT_GIVEN
236
- seed: Optional[Union[int, NotGiven]] = NOT_GIVEN
237
- service_tier: Optional[Union[Literal["auto", "default"], NotGiven]] = NOT_GIVEN
238
- stop: Optional[Union[Optional[str], List[str], None, NotGiven]] = NOT_GIVEN
239
- top_logprobs: Optional[Union[int, NotGiven]] = NOT_GIVEN
260
+ Union[completion_create_params.ResponseFormat, Literal["url", "b64_json"], NotGiven]
261
+ ] = NOT_GIVEN
262
+ seed: Optional[Union[int, NotGiven]] = NOT_GIVEN # Chat/Vertex Images 共用
240
263
  web_search_options: Optional[Union[completion_create_params.WebSearchOptions, NotGiven]] = NOT_GIVEN
241
- stream_options: Optional[Union[ChatCompletionStreamOptionsParam, NotGiven]] = NOT_GIVEN
242
264
 
243
265
  # Google GenAI Input
244
266
  contents: Optional[Union[types.ContentListUnion, types.ContentListUnionDict]] = None
245
267
  config: Optional[types.GenerateContentConfigOrDict] = None
246
268
 
247
- # OpenAIImagesInput + OpenAIImagesEditInput + GoogleVertexAIImagesInput 合并字段
269
+ # Images(OpenAI Images / Images Edit / Google Vertex Images 合并)
248
270
  image: Optional[Union[FileTypes, List[FileTypes]]] = None
249
- prompt: Optional[str] = None
250
- background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN
251
- moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN
252
- input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN
253
- output_compression: Optional[int] | NotGiven = NOT_GIVEN
254
- output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN
255
- partial_images: Optional[int] | NotGiven = NOT_GIVEN
256
- mask: FileTypes | NotGiven = NOT_GIVEN
271
+ # background 同名字段合并:Responses 的 bool(后台任务)+ Images 的透明度枚举
272
+ background: Optional[Union[bool, Literal["transparent", "opaque", "auto"], NotGiven]] = NOT_GIVEN
273
+ moderation: Optional[Union[Literal["low", "auto"], NotGiven]] = NOT_GIVEN
274
+ input_fidelity: Optional[Union[Literal["high", "low"], NotGiven]] = NOT_GIVEN
275
+ output_compression: Optional[Union[int, NotGiven]] = NOT_GIVEN
276
+ output_format: Optional[Union[Literal["png", "jpeg", "webp"], NotGiven]] = NOT_GIVEN
277
+ partial_images: Optional[Union[int, NotGiven]] = NOT_GIVEN
278
+ mask: Union[FileTypes, NotGiven] = NOT_GIVEN
257
279
  negative_prompt: Optional[str] = None
258
280
  aspect_ratio: Optional[Literal["1:1", "9:16", "16:9", "4:3", "3:4"]] = None
259
281
  guidance_scale: Optional[float] = None
@@ -262,10 +284,14 @@ class ModelRequestInput(BaseRequest):
262
284
  add_watermark: Optional[bool] = None
263
285
  safety_filter_level: Optional[Literal["block_most", "block_some", "block_few", "block_fewest"]] = None
264
286
  person_generation: Optional[Literal["dont_allow", "allow_adult", "allow_all"]] = None
265
- quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN
266
- size: Optional[Literal[
267
- "auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN
268
- style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN
287
+ quality: Optional[Union[Literal["standard", "hd", "low", "medium", "high", "auto"], NotGiven]] = NOT_GIVEN
288
+ size: Optional[
289
+ Union[
290
+ Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"],
291
+ NotGiven,
292
+ ]
293
+ ] = NOT_GIVEN
294
+ style: Optional[Union[Literal["vivid", "natural"], NotGiven]] = NOT_GIVEN
269
295
  number_of_images: Optional[int] = None # Google 用法
270
296
 
271
297
  model_config = {
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tamar-model-client
3
- Version: 0.2.3
3
+ Version: 0.2.4
4
4
  Summary: A Python SDK for interacting with the Model Manager gRPC service
5
5
  Home-page: http://gitlab.tamaredge.top/project-tap/AgentOS/model-manager-client
6
6
  Author: Oscar Ou