google-genai 1.30.0__py3-none-any.whl → 1.32.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/_api_client.py +32 -32
- google/genai/_automatic_function_calling_util.py +12 -0
- google/genai/_base_transformers.py +26 -0
- google/genai/_live_converters.py +1 -0
- google/genai/_local_tokenizer_loader.py +223 -0
- google/genai/_operations_converters.py +307 -0
- google/genai/_tokens_converters.py +1 -0
- google/genai/_transformers.py +0 -10
- google/genai/batches.py +141 -0
- google/genai/caches.py +15 -2
- google/genai/files.py +11 -2
- google/genai/local_tokenizer.py +362 -0
- google/genai/models.py +518 -17
- google/genai/operations.py +1 -0
- google/genai/tunings.py +135 -0
- google/genai/types.py +781 -323
- google/genai/version.py +1 -1
- {google_genai-1.30.0.dist-info → google_genai-1.32.0.dist-info}/METADATA +6 -6
- google_genai-1.32.0.dist-info/RECORD +39 -0
- google_genai-1.30.0.dist-info/RECORD +0 -35
- {google_genai-1.30.0.dist-info → google_genai-1.32.0.dist-info}/WHEEL +0 -0
- {google_genai-1.30.0.dist-info → google_genai-1.32.0.dist-info}/licenses/LICENSE +0 -0
- {google_genai-1.30.0.dist-info → google_genai-1.32.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,307 @@
|
|
1
|
+
# Copyright 2025 Google LLC
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
#
|
15
|
+
|
16
|
+
# Code generated by the Google Gen AI SDK generator DO NOT EDIT.
|
17
|
+
|
18
|
+
from typing import Any, Optional, Union
|
19
|
+
from . import _base_transformers as base_t
|
20
|
+
from ._common import get_value_by_path as getv
|
21
|
+
from ._common import set_value_by_path as setv
|
22
|
+
|
23
|
+
|
24
|
+
def _FetchPredictOperationParameters_to_mldev(
|
25
|
+
from_object: Union[dict[str, Any], object],
|
26
|
+
parent_object: Optional[dict[str, Any]] = None,
|
27
|
+
) -> dict[str, Any]:
|
28
|
+
to_object: dict[str, Any] = {}
|
29
|
+
if getv(from_object, ['operation_name']) is not None:
|
30
|
+
raise ValueError('operation_name parameter is not supported in Gemini API.')
|
31
|
+
|
32
|
+
if getv(from_object, ['resource_name']) is not None:
|
33
|
+
raise ValueError('resource_name parameter is not supported in Gemini API.')
|
34
|
+
|
35
|
+
if getv(from_object, ['config']) is not None:
|
36
|
+
raise ValueError('config parameter is not supported in Gemini API.')
|
37
|
+
|
38
|
+
return to_object
|
39
|
+
|
40
|
+
|
41
|
+
def _GetOperationParameters_to_mldev(
|
42
|
+
from_object: Union[dict[str, Any], object],
|
43
|
+
parent_object: Optional[dict[str, Any]] = None,
|
44
|
+
) -> dict[str, Any]:
|
45
|
+
to_object: dict[str, Any] = {}
|
46
|
+
if getv(from_object, ['operation_name']) is not None:
|
47
|
+
setv(
|
48
|
+
to_object,
|
49
|
+
['_url', 'operationName'],
|
50
|
+
getv(from_object, ['operation_name']),
|
51
|
+
)
|
52
|
+
|
53
|
+
if getv(from_object, ['config']) is not None:
|
54
|
+
setv(to_object, ['config'], getv(from_object, ['config']))
|
55
|
+
|
56
|
+
return to_object
|
57
|
+
|
58
|
+
|
59
|
+
def _FetchPredictOperationParameters_to_vertex(
|
60
|
+
from_object: Union[dict[str, Any], object],
|
61
|
+
parent_object: Optional[dict[str, Any]] = None,
|
62
|
+
) -> dict[str, Any]:
|
63
|
+
to_object: dict[str, Any] = {}
|
64
|
+
if getv(from_object, ['operation_name']) is not None:
|
65
|
+
setv(to_object, ['operationName'], getv(from_object, ['operation_name']))
|
66
|
+
|
67
|
+
if getv(from_object, ['resource_name']) is not None:
|
68
|
+
setv(
|
69
|
+
to_object,
|
70
|
+
['_url', 'resourceName'],
|
71
|
+
getv(from_object, ['resource_name']),
|
72
|
+
)
|
73
|
+
|
74
|
+
if getv(from_object, ['config']) is not None:
|
75
|
+
setv(to_object, ['config'], getv(from_object, ['config']))
|
76
|
+
|
77
|
+
return to_object
|
78
|
+
|
79
|
+
|
80
|
+
def _GetOperationParameters_to_vertex(
|
81
|
+
from_object: Union[dict[str, Any], object],
|
82
|
+
parent_object: Optional[dict[str, Any]] = None,
|
83
|
+
) -> dict[str, Any]:
|
84
|
+
to_object: dict[str, Any] = {}
|
85
|
+
if getv(from_object, ['operation_name']) is not None:
|
86
|
+
setv(
|
87
|
+
to_object,
|
88
|
+
['_url', 'operationName'],
|
89
|
+
getv(from_object, ['operation_name']),
|
90
|
+
)
|
91
|
+
|
92
|
+
if getv(from_object, ['config']) is not None:
|
93
|
+
setv(to_object, ['config'], getv(from_object, ['config']))
|
94
|
+
|
95
|
+
return to_object
|
96
|
+
|
97
|
+
|
98
|
+
def _Video_from_mldev(
|
99
|
+
from_object: Union[dict[str, Any], object],
|
100
|
+
parent_object: Optional[dict[str, Any]] = None,
|
101
|
+
) -> dict[str, Any]:
|
102
|
+
to_object: dict[str, Any] = {}
|
103
|
+
if getv(from_object, ['video', 'uri']) is not None:
|
104
|
+
setv(to_object, ['uri'], getv(from_object, ['video', 'uri']))
|
105
|
+
|
106
|
+
if getv(from_object, ['video', 'encodedVideo']) is not None:
|
107
|
+
setv(
|
108
|
+
to_object,
|
109
|
+
['video_bytes'],
|
110
|
+
base_t.t_bytes(getv(from_object, ['video', 'encodedVideo'])),
|
111
|
+
)
|
112
|
+
|
113
|
+
if getv(from_object, ['encoding']) is not None:
|
114
|
+
setv(to_object, ['mime_type'], getv(from_object, ['encoding']))
|
115
|
+
|
116
|
+
return to_object
|
117
|
+
|
118
|
+
|
119
|
+
def _GeneratedVideo_from_mldev(
|
120
|
+
from_object: Union[dict[str, Any], object],
|
121
|
+
parent_object: Optional[dict[str, Any]] = None,
|
122
|
+
) -> dict[str, Any]:
|
123
|
+
to_object: dict[str, Any] = {}
|
124
|
+
if getv(from_object, ['_self']) is not None:
|
125
|
+
setv(
|
126
|
+
to_object,
|
127
|
+
['video'],
|
128
|
+
_Video_from_mldev(getv(from_object, ['_self']), to_object),
|
129
|
+
)
|
130
|
+
|
131
|
+
return to_object
|
132
|
+
|
133
|
+
|
134
|
+
def _GenerateVideosResponse_from_mldev(
|
135
|
+
from_object: Union[dict[str, Any], object],
|
136
|
+
parent_object: Optional[dict[str, Any]] = None,
|
137
|
+
) -> dict[str, Any]:
|
138
|
+
to_object: dict[str, Any] = {}
|
139
|
+
if getv(from_object, ['generatedSamples']) is not None:
|
140
|
+
setv(
|
141
|
+
to_object,
|
142
|
+
['generated_videos'],
|
143
|
+
[
|
144
|
+
_GeneratedVideo_from_mldev(item, to_object)
|
145
|
+
for item in getv(from_object, ['generatedSamples'])
|
146
|
+
],
|
147
|
+
)
|
148
|
+
|
149
|
+
if getv(from_object, ['raiMediaFilteredCount']) is not None:
|
150
|
+
setv(
|
151
|
+
to_object,
|
152
|
+
['rai_media_filtered_count'],
|
153
|
+
getv(from_object, ['raiMediaFilteredCount']),
|
154
|
+
)
|
155
|
+
|
156
|
+
if getv(from_object, ['raiMediaFilteredReasons']) is not None:
|
157
|
+
setv(
|
158
|
+
to_object,
|
159
|
+
['rai_media_filtered_reasons'],
|
160
|
+
getv(from_object, ['raiMediaFilteredReasons']),
|
161
|
+
)
|
162
|
+
|
163
|
+
return to_object
|
164
|
+
|
165
|
+
|
166
|
+
def _GenerateVideosOperation_from_mldev(
|
167
|
+
from_object: Union[dict[str, Any], object],
|
168
|
+
parent_object: Optional[dict[str, Any]] = None,
|
169
|
+
) -> dict[str, Any]:
|
170
|
+
to_object: dict[str, Any] = {}
|
171
|
+
if getv(from_object, ['name']) is not None:
|
172
|
+
setv(to_object, ['name'], getv(from_object, ['name']))
|
173
|
+
|
174
|
+
if getv(from_object, ['metadata']) is not None:
|
175
|
+
setv(to_object, ['metadata'], getv(from_object, ['metadata']))
|
176
|
+
|
177
|
+
if getv(from_object, ['done']) is not None:
|
178
|
+
setv(to_object, ['done'], getv(from_object, ['done']))
|
179
|
+
|
180
|
+
if getv(from_object, ['error']) is not None:
|
181
|
+
setv(to_object, ['error'], getv(from_object, ['error']))
|
182
|
+
|
183
|
+
if getv(from_object, ['response', 'generateVideoResponse']) is not None:
|
184
|
+
setv(
|
185
|
+
to_object,
|
186
|
+
['response'],
|
187
|
+
_GenerateVideosResponse_from_mldev(
|
188
|
+
getv(from_object, ['response', 'generateVideoResponse']), to_object
|
189
|
+
),
|
190
|
+
)
|
191
|
+
|
192
|
+
if getv(from_object, ['response', 'generateVideoResponse']) is not None:
|
193
|
+
setv(
|
194
|
+
to_object,
|
195
|
+
['result'],
|
196
|
+
_GenerateVideosResponse_from_mldev(
|
197
|
+
getv(from_object, ['response', 'generateVideoResponse']), to_object
|
198
|
+
),
|
199
|
+
)
|
200
|
+
|
201
|
+
return to_object
|
202
|
+
|
203
|
+
|
204
|
+
def _Video_from_vertex(
|
205
|
+
from_object: Union[dict[str, Any], object],
|
206
|
+
parent_object: Optional[dict[str, Any]] = None,
|
207
|
+
) -> dict[str, Any]:
|
208
|
+
to_object: dict[str, Any] = {}
|
209
|
+
if getv(from_object, ['gcsUri']) is not None:
|
210
|
+
setv(to_object, ['uri'], getv(from_object, ['gcsUri']))
|
211
|
+
|
212
|
+
if getv(from_object, ['bytesBase64Encoded']) is not None:
|
213
|
+
setv(
|
214
|
+
to_object,
|
215
|
+
['video_bytes'],
|
216
|
+
base_t.t_bytes(getv(from_object, ['bytesBase64Encoded'])),
|
217
|
+
)
|
218
|
+
|
219
|
+
if getv(from_object, ['mimeType']) is not None:
|
220
|
+
setv(to_object, ['mime_type'], getv(from_object, ['mimeType']))
|
221
|
+
|
222
|
+
return to_object
|
223
|
+
|
224
|
+
|
225
|
+
def _GeneratedVideo_from_vertex(
|
226
|
+
from_object: Union[dict[str, Any], object],
|
227
|
+
parent_object: Optional[dict[str, Any]] = None,
|
228
|
+
) -> dict[str, Any]:
|
229
|
+
to_object: dict[str, Any] = {}
|
230
|
+
if getv(from_object, ['_self']) is not None:
|
231
|
+
setv(
|
232
|
+
to_object,
|
233
|
+
['video'],
|
234
|
+
_Video_from_vertex(getv(from_object, ['_self']), to_object),
|
235
|
+
)
|
236
|
+
|
237
|
+
return to_object
|
238
|
+
|
239
|
+
|
240
|
+
def _GenerateVideosResponse_from_vertex(
|
241
|
+
from_object: Union[dict[str, Any], object],
|
242
|
+
parent_object: Optional[dict[str, Any]] = None,
|
243
|
+
) -> dict[str, Any]:
|
244
|
+
to_object: dict[str, Any] = {}
|
245
|
+
if getv(from_object, ['videos']) is not None:
|
246
|
+
setv(
|
247
|
+
to_object,
|
248
|
+
['generated_videos'],
|
249
|
+
[
|
250
|
+
_GeneratedVideo_from_vertex(item, to_object)
|
251
|
+
for item in getv(from_object, ['videos'])
|
252
|
+
],
|
253
|
+
)
|
254
|
+
|
255
|
+
if getv(from_object, ['raiMediaFilteredCount']) is not None:
|
256
|
+
setv(
|
257
|
+
to_object,
|
258
|
+
['rai_media_filtered_count'],
|
259
|
+
getv(from_object, ['raiMediaFilteredCount']),
|
260
|
+
)
|
261
|
+
|
262
|
+
if getv(from_object, ['raiMediaFilteredReasons']) is not None:
|
263
|
+
setv(
|
264
|
+
to_object,
|
265
|
+
['rai_media_filtered_reasons'],
|
266
|
+
getv(from_object, ['raiMediaFilteredReasons']),
|
267
|
+
)
|
268
|
+
|
269
|
+
return to_object
|
270
|
+
|
271
|
+
|
272
|
+
def _GenerateVideosOperation_from_vertex(
|
273
|
+
from_object: Union[dict[str, Any], object],
|
274
|
+
parent_object: Optional[dict[str, Any]] = None,
|
275
|
+
) -> dict[str, Any]:
|
276
|
+
to_object: dict[str, Any] = {}
|
277
|
+
if getv(from_object, ['name']) is not None:
|
278
|
+
setv(to_object, ['name'], getv(from_object, ['name']))
|
279
|
+
|
280
|
+
if getv(from_object, ['metadata']) is not None:
|
281
|
+
setv(to_object, ['metadata'], getv(from_object, ['metadata']))
|
282
|
+
|
283
|
+
if getv(from_object, ['done']) is not None:
|
284
|
+
setv(to_object, ['done'], getv(from_object, ['done']))
|
285
|
+
|
286
|
+
if getv(from_object, ['error']) is not None:
|
287
|
+
setv(to_object, ['error'], getv(from_object, ['error']))
|
288
|
+
|
289
|
+
if getv(from_object, ['response']) is not None:
|
290
|
+
setv(
|
291
|
+
to_object,
|
292
|
+
['response'],
|
293
|
+
_GenerateVideosResponse_from_vertex(
|
294
|
+
getv(from_object, ['response']), to_object
|
295
|
+
),
|
296
|
+
)
|
297
|
+
|
298
|
+
if getv(from_object, ['response']) is not None:
|
299
|
+
setv(
|
300
|
+
to_object,
|
301
|
+
['result'],
|
302
|
+
_GenerateVideosResponse_from_vertex(
|
303
|
+
getv(from_object, ['response']), to_object
|
304
|
+
),
|
305
|
+
)
|
306
|
+
|
307
|
+
return to_object
|
google/genai/_transformers.py
CHANGED
@@ -1156,16 +1156,6 @@ def t_tuning_job_status(status: str) -> Union[types.JobState, str]:
|
|
1156
1156
|
return status
|
1157
1157
|
|
1158
1158
|
|
1159
|
-
# Some fields don't accept url safe base64 encoding.
|
1160
|
-
# We shouldn't use this transformer if the backend adhere to Cloud Type
|
1161
|
-
# format https://cloud.google.com/docs/discovery/type-format.
|
1162
|
-
# TODO(b/389133914,b/390320301): Remove the hack after backend fix the issue.
|
1163
|
-
def t_bytes(data: bytes) -> str:
|
1164
|
-
if not isinstance(data, bytes):
|
1165
|
-
return data
|
1166
|
-
return base64.b64encode(data).decode('ascii')
|
1167
|
-
|
1168
|
-
|
1169
1159
|
def t_content_strict(content: types.ContentOrDict) -> types.Content:
|
1170
1160
|
if isinstance(content, dict):
|
1171
1161
|
return types.Content.model_validate(content)
|
google/genai/batches.py
CHANGED
@@ -30,6 +30,7 @@ from ._common import get_value_by_path as getv
|
|
30
30
|
from ._common import set_value_by_path as setv
|
31
31
|
from .pagers import AsyncPager, Pager
|
32
32
|
|
33
|
+
|
33
34
|
logger = logging.getLogger('google_genai.batches')
|
34
35
|
|
35
36
|
|
@@ -2257,6 +2258,17 @@ class Batches(_api_module.BaseModule):
|
|
2257
2258
|
)
|
2258
2259
|
print(batch_job.state)
|
2259
2260
|
"""
|
2261
|
+
parameter_model = types._CreateBatchJobParameters(
|
2262
|
+
model=model,
|
2263
|
+
src=src,
|
2264
|
+
config=config,
|
2265
|
+
)
|
2266
|
+
http_options: Optional[types.HttpOptions] = None
|
2267
|
+
if (
|
2268
|
+
parameter_model.config is not None
|
2269
|
+
and parameter_model.config.http_options is not None
|
2270
|
+
):
|
2271
|
+
http_options = parameter_model.config.http_options
|
2260
2272
|
if self._api_client.vertexai:
|
2261
2273
|
if isinstance(src, list):
|
2262
2274
|
raise ValueError(
|
@@ -2265,6 +2277,65 @@ class Batches(_api_module.BaseModule):
|
|
2265
2277
|
)
|
2266
2278
|
|
2267
2279
|
config = _extra_utils.format_destination(src, config)
|
2280
|
+
else:
|
2281
|
+
if isinstance(parameter_model.src, list) or (
|
2282
|
+
not isinstance(parameter_model.src, str)
|
2283
|
+
and parameter_model.src
|
2284
|
+
and parameter_model.src.inlined_requests
|
2285
|
+
):
|
2286
|
+
# Handle system instruction in InlinedRequests.
|
2287
|
+
request_url_dict: Optional[dict[str, str]]
|
2288
|
+
request_dict: dict[str, Any] = _CreateBatchJobParameters_to_mldev(
|
2289
|
+
self._api_client, parameter_model
|
2290
|
+
)
|
2291
|
+
request_url_dict = request_dict.get('_url')
|
2292
|
+
if request_url_dict:
|
2293
|
+
path = '{model}:batchGenerateContent'.format_map(request_url_dict)
|
2294
|
+
else:
|
2295
|
+
path = '{model}:batchGenerateContent'
|
2296
|
+
query_params = request_dict.get('_query')
|
2297
|
+
if query_params:
|
2298
|
+
path = f'{path}?{urlencode(query_params)}'
|
2299
|
+
request_dict.pop('config', None)
|
2300
|
+
|
2301
|
+
request_dict = _common.convert_to_dict(request_dict)
|
2302
|
+
request_dict = _common.encode_unserializable_types(request_dict)
|
2303
|
+
# Move system instruction to 'request':
|
2304
|
+
# {'systemInstruction': system_instruction}
|
2305
|
+
requests = []
|
2306
|
+
batch_dict = request_dict.get('batch')
|
2307
|
+
if batch_dict and isinstance(batch_dict, dict):
|
2308
|
+
input_config_dict = batch_dict.get('inputConfig')
|
2309
|
+
if input_config_dict and isinstance(input_config_dict, dict):
|
2310
|
+
requests_dict = input_config_dict.get('requests')
|
2311
|
+
if requests_dict and isinstance(requests_dict, dict):
|
2312
|
+
requests = requests_dict.get('requests')
|
2313
|
+
new_requests = []
|
2314
|
+
if requests:
|
2315
|
+
for req in requests:
|
2316
|
+
if req.get('systemInstruction'):
|
2317
|
+
value = req.pop('systemInstruction')
|
2318
|
+
req['request'].update({'systemInstruction': value})
|
2319
|
+
new_requests.append(req)
|
2320
|
+
request_dict['batch']['inputConfig']['requests'][ # type: ignore
|
2321
|
+
'requests'
|
2322
|
+
] = new_requests
|
2323
|
+
|
2324
|
+
response = self._api_client.request(
|
2325
|
+
'post', path, request_dict, http_options
|
2326
|
+
)
|
2327
|
+
|
2328
|
+
response_dict = '' if not response.body else json.loads(response.body)
|
2329
|
+
|
2330
|
+
response_dict = _BatchJob_from_mldev(response_dict)
|
2331
|
+
|
2332
|
+
return_value = types.BatchJob._from_response(
|
2333
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
2334
|
+
)
|
2335
|
+
|
2336
|
+
self._api_client._verify_response(return_value)
|
2337
|
+
return return_value
|
2338
|
+
|
2268
2339
|
return self._create(model=model, src=src, config=config)
|
2269
2340
|
|
2270
2341
|
def list(
|
@@ -2691,6 +2762,17 @@ class AsyncBatches(_api_module.BaseModule):
|
|
2691
2762
|
src="gs://path/to/input/data",
|
2692
2763
|
)
|
2693
2764
|
"""
|
2765
|
+
parameter_model = types._CreateBatchJobParameters(
|
2766
|
+
model=model,
|
2767
|
+
src=src,
|
2768
|
+
config=config,
|
2769
|
+
)
|
2770
|
+
http_options: Optional[types.HttpOptions] = None
|
2771
|
+
if (
|
2772
|
+
parameter_model.config is not None
|
2773
|
+
and parameter_model.config.http_options is not None
|
2774
|
+
):
|
2775
|
+
http_options = parameter_model.config.http_options
|
2694
2776
|
if self._api_client.vertexai:
|
2695
2777
|
if isinstance(src, list):
|
2696
2778
|
raise ValueError(
|
@@ -2699,6 +2781,65 @@ class AsyncBatches(_api_module.BaseModule):
|
|
2699
2781
|
)
|
2700
2782
|
|
2701
2783
|
config = _extra_utils.format_destination(src, config)
|
2784
|
+
else:
|
2785
|
+
if isinstance(parameter_model.src, list) or (
|
2786
|
+
not isinstance(parameter_model.src, str)
|
2787
|
+
and parameter_model.src
|
2788
|
+
and parameter_model.src.inlined_requests
|
2789
|
+
):
|
2790
|
+
# Handle system instruction in InlinedRequests.
|
2791
|
+
request_url_dict: Optional[dict[str, str]]
|
2792
|
+
request_dict: dict[str, Any] = _CreateBatchJobParameters_to_mldev(
|
2793
|
+
self._api_client, parameter_model
|
2794
|
+
)
|
2795
|
+
request_url_dict = request_dict.get('_url')
|
2796
|
+
if request_url_dict:
|
2797
|
+
path = '{model}:batchGenerateContent'.format_map(request_url_dict)
|
2798
|
+
else:
|
2799
|
+
path = '{model}:batchGenerateContent'
|
2800
|
+
query_params = request_dict.get('_query')
|
2801
|
+
if query_params:
|
2802
|
+
path = f'{path}?{urlencode(query_params)}'
|
2803
|
+
request_dict.pop('config', None)
|
2804
|
+
|
2805
|
+
request_dict = _common.convert_to_dict(request_dict)
|
2806
|
+
request_dict = _common.encode_unserializable_types(request_dict)
|
2807
|
+
# Move system instruction to 'request':
|
2808
|
+
# {'systemInstruction': system_instruction}
|
2809
|
+
requests = []
|
2810
|
+
batch_dict = request_dict.get('batch')
|
2811
|
+
if batch_dict and isinstance(batch_dict, dict):
|
2812
|
+
input_config_dict = batch_dict.get('inputConfig')
|
2813
|
+
if input_config_dict and isinstance(input_config_dict, dict):
|
2814
|
+
requests_dict = input_config_dict.get('requests')
|
2815
|
+
if requests_dict and isinstance(requests_dict, dict):
|
2816
|
+
requests = requests_dict.get('requests')
|
2817
|
+
new_requests = []
|
2818
|
+
if requests:
|
2819
|
+
for req in requests:
|
2820
|
+
if req.get('systemInstruction'):
|
2821
|
+
value = req.pop('systemInstruction')
|
2822
|
+
req['request'].update({'systemInstruction': value})
|
2823
|
+
new_requests.append(req)
|
2824
|
+
request_dict['batch']['inputConfig']['requests'][ # type: ignore
|
2825
|
+
'requests'
|
2826
|
+
] = new_requests
|
2827
|
+
|
2828
|
+
response = await self._api_client.async_request(
|
2829
|
+
'post', path, request_dict, http_options
|
2830
|
+
)
|
2831
|
+
|
2832
|
+
response_dict = '' if not response.body else json.loads(response.body)
|
2833
|
+
|
2834
|
+
response_dict = _BatchJob_from_mldev(response_dict)
|
2835
|
+
|
2836
|
+
return_value = types.BatchJob._from_response(
|
2837
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
2838
|
+
)
|
2839
|
+
|
2840
|
+
self._api_client._verify_response(return_value)
|
2841
|
+
return return_value
|
2842
|
+
|
2702
2843
|
return await self._create(model=model, src=src, config=config)
|
2703
2844
|
|
2704
2845
|
async def list(
|
google/genai/caches.py
CHANGED
@@ -29,6 +29,7 @@ from ._common import get_value_by_path as getv
|
|
29
29
|
from ._common import set_value_by_path as setv
|
30
30
|
from .pagers import AsyncPager, Pager
|
31
31
|
|
32
|
+
|
32
33
|
logger = logging.getLogger('google_genai.caches')
|
33
34
|
|
34
35
|
|
@@ -1362,6 +1363,10 @@ def _DeleteCachedContentResponse_from_mldev(
|
|
1362
1363
|
parent_object: Optional[dict[str, Any]] = None,
|
1363
1364
|
) -> dict[str, Any]:
|
1364
1365
|
to_object: dict[str, Any] = {}
|
1366
|
+
if getv(from_object, ['sdkHttpResponse']) is not None:
|
1367
|
+
setv(
|
1368
|
+
to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
|
1369
|
+
)
|
1365
1370
|
|
1366
1371
|
return to_object
|
1367
1372
|
|
@@ -1426,6 +1431,10 @@ def _DeleteCachedContentResponse_from_vertex(
|
|
1426
1431
|
parent_object: Optional[dict[str, Any]] = None,
|
1427
1432
|
) -> dict[str, Any]:
|
1428
1433
|
to_object: dict[str, Any] = {}
|
1434
|
+
if getv(from_object, ['sdkHttpResponse']) is not None:
|
1435
|
+
setv(
|
1436
|
+
to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
|
1437
|
+
)
|
1429
1438
|
|
1430
1439
|
return to_object
|
1431
1440
|
|
@@ -1684,7 +1693,9 @@ class Caches(_api_module.BaseModule):
|
|
1684
1693
|
return_value = types.DeleteCachedContentResponse._from_response(
|
1685
1694
|
response=response_dict, kwargs=parameter_model.model_dump()
|
1686
1695
|
)
|
1687
|
-
|
1696
|
+
return_value.sdk_http_response = types.HttpResponse(
|
1697
|
+
headers=response.headers
|
1698
|
+
)
|
1688
1699
|
self._api_client._verify_response(return_value)
|
1689
1700
|
return return_value
|
1690
1701
|
|
@@ -2076,7 +2087,9 @@ class AsyncCaches(_api_module.BaseModule):
|
|
2076
2087
|
return_value = types.DeleteCachedContentResponse._from_response(
|
2077
2088
|
response=response_dict, kwargs=parameter_model.model_dump()
|
2078
2089
|
)
|
2079
|
-
|
2090
|
+
return_value.sdk_http_response = types.HttpResponse(
|
2091
|
+
headers=response.headers
|
2092
|
+
)
|
2080
2093
|
self._api_client._verify_response(return_value)
|
2081
2094
|
return return_value
|
2082
2095
|
|
google/genai/files.py
CHANGED
@@ -31,6 +31,7 @@ from ._common import get_value_by_path as getv
|
|
31
31
|
from ._common import set_value_by_path as setv
|
32
32
|
from .pagers import AsyncPager, Pager
|
33
33
|
|
34
|
+
|
34
35
|
logger = logging.getLogger('google_genai.files')
|
35
36
|
|
36
37
|
|
@@ -306,6 +307,10 @@ def _DeleteFileResponse_from_mldev(
|
|
306
307
|
parent_object: Optional[dict[str, Any]] = None,
|
307
308
|
) -> dict[str, Any]:
|
308
309
|
to_object: dict[str, Any] = {}
|
310
|
+
if getv(from_object, ['sdkHttpResponse']) is not None:
|
311
|
+
setv(
|
312
|
+
to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
|
313
|
+
)
|
309
314
|
|
310
315
|
return to_object
|
311
316
|
|
@@ -576,7 +581,9 @@ class Files(_api_module.BaseModule):
|
|
576
581
|
return_value = types.DeleteFileResponse._from_response(
|
577
582
|
response=response_dict, kwargs=parameter_model.model_dump()
|
578
583
|
)
|
579
|
-
|
584
|
+
return_value.sdk_http_response = types.HttpResponse(
|
585
|
+
headers=response.headers
|
586
|
+
)
|
580
587
|
self._api_client._verify_response(return_value)
|
581
588
|
return return_value
|
582
589
|
|
@@ -1058,7 +1065,9 @@ class AsyncFiles(_api_module.BaseModule):
|
|
1058
1065
|
return_value = types.DeleteFileResponse._from_response(
|
1059
1066
|
response=response_dict, kwargs=parameter_model.model_dump()
|
1060
1067
|
)
|
1061
|
-
|
1068
|
+
return_value.sdk_http_response = types.HttpResponse(
|
1069
|
+
headers=response.headers
|
1070
|
+
)
|
1062
1071
|
self._api_client._verify_response(return_value)
|
1063
1072
|
return return_value
|
1064
1073
|
|