google-genai 1.19.0__py3-none-any.whl → 1.21.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/_api_client.py +449 -137
- google/genai/_common.py +88 -1
- google/genai/_live_converters.py +174 -414
- google/genai/_replay_api_client.py +10 -9
- google/genai/_tokens_converters.py +81 -176
- google/genai/_transformers.py +19 -40
- google/genai/batches.py +47 -64
- google/genai/caches.py +132 -222
- google/genai/chats.py +9 -14
- google/genai/client.py +1 -1
- google/genai/errors.py +32 -6
- google/genai/files.py +89 -103
- google/genai/live.py +15 -20
- google/genai/live_music.py +4 -5
- google/genai/models.py +412 -558
- google/genai/operations.py +36 -68
- google/genai/tokens.py +11 -6
- google/genai/tunings.py +65 -113
- google/genai/types.py +305 -92
- google/genai/version.py +1 -1
- {google_genai-1.19.0.dist-info → google_genai-1.21.0.dist-info}/METADATA +47 -1
- google_genai-1.21.0.dist-info/RECORD +35 -0
- google_genai-1.19.0.dist-info/RECORD +0 -35
- {google_genai-1.19.0.dist-info → google_genai-1.21.0.dist-info}/WHEEL +0 -0
- {google_genai-1.19.0.dist-info → google_genai-1.21.0.dist-info}/licenses/LICENSE +0 -0
- {google_genai-1.19.0.dist-info → google_genai-1.21.0.dist-info}/top_level.txt +0 -0
google/genai/_transformers.py
CHANGED
@@ -195,7 +195,6 @@ def t_models_url(
|
|
195
195
|
|
196
196
|
|
197
197
|
def t_extract_models(
|
198
|
-
api_client: _api_client.BaseApiClient,
|
199
198
|
response: dict[str, Any],
|
200
199
|
) -> list[dict[str, Any]]:
|
201
200
|
if not response:
|
@@ -299,18 +298,15 @@ def t_function_responses(
|
|
299
298
|
|
300
299
|
|
301
300
|
def t_blobs(
|
302
|
-
api_client: _api_client.BaseApiClient,
|
303
301
|
blobs: Union[types.BlobImageUnionDict, list[types.BlobImageUnionDict]],
|
304
302
|
) -> list[types.Blob]:
|
305
303
|
if isinstance(blobs, list):
|
306
|
-
return [t_blob(
|
304
|
+
return [t_blob(blob) for blob in blobs]
|
307
305
|
else:
|
308
|
-
return [t_blob(
|
306
|
+
return [t_blob(blobs)]
|
309
307
|
|
310
308
|
|
311
|
-
def t_blob(
|
312
|
-
api_client: _api_client.BaseApiClient, blob: types.BlobImageUnionDict
|
313
|
-
) -> types.Blob:
|
309
|
+
def t_blob(blob: types.BlobImageUnionDict) -> types.Blob:
|
314
310
|
try:
|
315
311
|
import PIL.Image
|
316
312
|
|
@@ -335,19 +331,15 @@ def t_blob(
|
|
335
331
|
)
|
336
332
|
|
337
333
|
|
338
|
-
def t_image_blob(
|
339
|
-
|
340
|
-
) -> types.Blob:
|
341
|
-
blob = t_blob(api_client, blob)
|
334
|
+
def t_image_blob(blob: types.BlobImageUnionDict) -> types.Blob:
|
335
|
+
blob = t_blob(blob)
|
342
336
|
if blob.mime_type and blob.mime_type.startswith('image/'):
|
343
337
|
return blob
|
344
338
|
raise ValueError(f'Unsupported mime type: {blob.mime_type!r}')
|
345
339
|
|
346
340
|
|
347
|
-
def t_audio_blob(
|
348
|
-
|
349
|
-
) -> types.Blob:
|
350
|
-
blob = t_blob(api_client, blob)
|
341
|
+
def t_audio_blob(blob: types.BlobOrDict) -> types.Blob:
|
342
|
+
blob = t_blob(blob)
|
351
343
|
if blob.mime_type and blob.mime_type.startswith('audio/'):
|
352
344
|
return blob
|
353
345
|
raise ValueError(f'Unsupported mime type: {blob.mime_type!r}')
|
@@ -393,7 +385,6 @@ def t_parts(
|
|
393
385
|
|
394
386
|
|
395
387
|
def t_image_predictions(
|
396
|
-
client: _api_client.BaseApiClient,
|
397
388
|
predictions: Optional[Iterable[Mapping[str, Any]]],
|
398
389
|
) -> Optional[list[types.GeneratedImage]]:
|
399
390
|
if not predictions:
|
@@ -416,7 +407,6 @@ ContentType = Union[types.Content, types.ContentDict, types.PartUnionDict]
|
|
416
407
|
|
417
408
|
|
418
409
|
def t_content(
|
419
|
-
client: _api_client.BaseApiClient,
|
420
410
|
content: Optional[ContentType],
|
421
411
|
) -> types.Content:
|
422
412
|
if content is None:
|
@@ -447,9 +437,9 @@ def t_contents_for_embed(
|
|
447
437
|
contents: Union[list[types.Content], list[types.ContentDict], ContentType],
|
448
438
|
) -> Union[list[str], list[types.Content]]:
|
449
439
|
if isinstance(contents, list):
|
450
|
-
transformed_contents = [t_content(
|
440
|
+
transformed_contents = [t_content(content) for content in contents]
|
451
441
|
else:
|
452
|
-
transformed_contents = [t_content(
|
442
|
+
transformed_contents = [t_content(contents)]
|
453
443
|
|
454
444
|
if client.vertexai:
|
455
445
|
text_parts = []
|
@@ -469,7 +459,6 @@ def t_contents_for_embed(
|
|
469
459
|
|
470
460
|
|
471
461
|
def t_contents(
|
472
|
-
client: _api_client.BaseApiClient,
|
473
462
|
contents: Optional[
|
474
463
|
Union[types.ContentListUnion, types.ContentListUnionDict, types.Content]
|
475
464
|
],
|
@@ -477,7 +466,7 @@ def t_contents(
|
|
477
466
|
if contents is None or (isinstance(contents, list) and not contents):
|
478
467
|
raise ValueError('contents are required.')
|
479
468
|
if not isinstance(contents, list):
|
480
|
-
return [t_content(
|
469
|
+
return [t_content(contents)]
|
481
470
|
|
482
471
|
try:
|
483
472
|
import PIL.Image
|
@@ -635,14 +624,13 @@ def _raise_for_unsupported_schema_type(origin: Any) -> None:
|
|
635
624
|
raise ValueError(f'Unsupported schema type: {origin}')
|
636
625
|
|
637
626
|
|
638
|
-
def _raise_for_unsupported_mldev_properties(
|
627
|
+
def _raise_for_unsupported_mldev_properties(
|
628
|
+
schema: Any, client: _api_client.BaseApiClient
|
629
|
+
) -> None:
|
639
630
|
if not client.vertexai and (
|
640
|
-
schema.get('additionalProperties')
|
641
|
-
or schema.get('additional_properties')
|
631
|
+
schema.get('additionalProperties') or schema.get('additional_properties')
|
642
632
|
):
|
643
|
-
raise ValueError(
|
644
|
-
'additionalProperties is not supported in the Gemini API.'
|
645
|
-
)
|
633
|
+
raise ValueError('additionalProperties is not supported in the Gemini API.')
|
646
634
|
|
647
635
|
|
648
636
|
def process_schema(
|
@@ -872,7 +860,6 @@ def t_schema(
|
|
872
860
|
|
873
861
|
|
874
862
|
def t_speech_config(
|
875
|
-
_: _api_client.BaseApiClient,
|
876
863
|
origin: Union[types.SpeechConfigUnionDict, Any],
|
877
864
|
) -> Optional[types.SpeechConfig]:
|
878
865
|
if not origin:
|
@@ -892,7 +879,6 @@ def t_speech_config(
|
|
892
879
|
|
893
880
|
|
894
881
|
def t_live_speech_config(
|
895
|
-
client: _api_client.BaseApiClient,
|
896
882
|
origin: types.SpeechConfigOrDict,
|
897
883
|
) -> Optional[types.SpeechConfig]:
|
898
884
|
if isinstance(origin, types.SpeechConfig):
|
@@ -958,9 +944,7 @@ def t_cached_content_name(client: _api_client.BaseApiClient, name: str) -> str:
|
|
958
944
|
return _resource_name(client, name, collection_identifier='cachedContents')
|
959
945
|
|
960
946
|
|
961
|
-
def t_batch_job_source(
|
962
|
-
client: _api_client.BaseApiClient, src: str
|
963
|
-
) -> types.BatchJobSource:
|
947
|
+
def t_batch_job_source(src: str) -> types.BatchJobSource:
|
964
948
|
if src.startswith('gs://'):
|
965
949
|
return types.BatchJobSource(
|
966
950
|
format='jsonl',
|
@@ -975,9 +959,7 @@ def t_batch_job_source(
|
|
975
959
|
raise ValueError(f'Unsupported source: {src}')
|
976
960
|
|
977
961
|
|
978
|
-
def t_batch_job_destination(
|
979
|
-
client: _api_client.BaseApiClient, dest: str
|
980
|
-
) -> types.BatchJobDestination:
|
962
|
+
def t_batch_job_destination(dest: str) -> types.BatchJobDestination:
|
981
963
|
if dest.startswith('gs://'):
|
982
964
|
return types.BatchJobDestination(
|
983
965
|
format='jsonl',
|
@@ -1042,7 +1024,6 @@ def t_resolve_operation(
|
|
1042
1024
|
|
1043
1025
|
|
1044
1026
|
def t_file_name(
|
1045
|
-
api_client: _api_client.BaseApiClient,
|
1046
1027
|
name: Optional[Union[str, types.File, types.Video, types.GeneratedVideo]],
|
1047
1028
|
) -> str:
|
1048
1029
|
# Remove the files/ prefix since it's added to the url path.
|
@@ -1076,9 +1057,7 @@ def t_file_name(
|
|
1076
1057
|
return name
|
1077
1058
|
|
1078
1059
|
|
1079
|
-
def t_tuning_job_status(
|
1080
|
-
api_client: _api_client.BaseApiClient, status: str
|
1081
|
-
) -> Union[types.JobState, str]:
|
1060
|
+
def t_tuning_job_status(status: str) -> Union[types.JobState, str]:
|
1082
1061
|
if status == 'STATE_UNSPECIFIED':
|
1083
1062
|
return types.JobState.JOB_STATE_UNSPECIFIED
|
1084
1063
|
elif status == 'CREATING':
|
@@ -1098,7 +1077,7 @@ def t_tuning_job_status(
|
|
1098
1077
|
# We shouldn't use this transformer if the backend adhere to Cloud Type
|
1099
1078
|
# format https://cloud.google.com/docs/discovery/type-format.
|
1100
1079
|
# TODO(b/389133914,b/390320301): Remove the hack after backend fix the issue.
|
1101
|
-
def t_bytes(
|
1080
|
+
def t_bytes(data: bytes) -> str:
|
1102
1081
|
if not isinstance(data, bytes):
|
1103
1082
|
return data
|
1104
1083
|
return base64.b64encode(data).decode('ascii')
|
google/genai/batches.py
CHANGED
@@ -15,9 +15,11 @@
|
|
15
15
|
|
16
16
|
# Code generated by the Google Gen AI SDK generator DO NOT EDIT.
|
17
17
|
|
18
|
+
import json
|
18
19
|
import logging
|
19
20
|
from typing import Any, Optional, Union
|
20
21
|
from urllib.parse import urlencode
|
22
|
+
|
21
23
|
from . import _api_module
|
22
24
|
from . import _common
|
23
25
|
from . import _extra_utils
|
@@ -32,7 +34,6 @@ logger = logging.getLogger('google_genai.batches')
|
|
32
34
|
|
33
35
|
|
34
36
|
def _BatchJobSource_to_vertex(
|
35
|
-
api_client: BaseApiClient,
|
36
37
|
from_object: Union[dict[str, Any], object],
|
37
38
|
parent_object: Optional[dict[str, Any]] = None,
|
38
39
|
) -> dict[str, Any]:
|
@@ -54,7 +55,6 @@ def _BatchJobSource_to_vertex(
|
|
54
55
|
|
55
56
|
|
56
57
|
def _BatchJobDestination_to_vertex(
|
57
|
-
api_client: BaseApiClient,
|
58
58
|
from_object: Union[dict[str, Any], object],
|
59
59
|
parent_object: Optional[dict[str, Any]] = None,
|
60
60
|
) -> dict[str, Any]:
|
@@ -80,7 +80,6 @@ def _BatchJobDestination_to_vertex(
|
|
80
80
|
|
81
81
|
|
82
82
|
def _CreateBatchJobConfig_to_vertex(
|
83
|
-
api_client: BaseApiClient,
|
84
83
|
from_object: Union[dict[str, Any], object],
|
85
84
|
parent_object: Optional[dict[str, Any]] = None,
|
86
85
|
) -> dict[str, Any]:
|
@@ -94,9 +93,7 @@ def _CreateBatchJobConfig_to_vertex(
|
|
94
93
|
parent_object,
|
95
94
|
['outputConfig'],
|
96
95
|
_BatchJobDestination_to_vertex(
|
97
|
-
|
98
|
-
t.t_batch_job_destination(api_client, getv(from_object, ['dest'])),
|
99
|
-
to_object,
|
96
|
+
t.t_batch_job_destination(getv(from_object, ['dest'])), to_object
|
100
97
|
),
|
101
98
|
)
|
102
99
|
|
@@ -121,9 +118,7 @@ def _CreateBatchJobParameters_to_vertex(
|
|
121
118
|
to_object,
|
122
119
|
['inputConfig'],
|
123
120
|
_BatchJobSource_to_vertex(
|
124
|
-
|
125
|
-
t.t_batch_job_source(api_client, getv(from_object, ['src'])),
|
126
|
-
to_object,
|
121
|
+
t.t_batch_job_source(getv(from_object, ['src'])), to_object
|
127
122
|
),
|
128
123
|
)
|
129
124
|
|
@@ -132,7 +127,7 @@ def _CreateBatchJobParameters_to_vertex(
|
|
132
127
|
to_object,
|
133
128
|
['config'],
|
134
129
|
_CreateBatchJobConfig_to_vertex(
|
135
|
-
|
130
|
+
getv(from_object, ['config']), to_object
|
136
131
|
),
|
137
132
|
)
|
138
133
|
|
@@ -178,7 +173,6 @@ def _CancelBatchJobParameters_to_vertex(
|
|
178
173
|
|
179
174
|
|
180
175
|
def _ListBatchJobsConfig_to_vertex(
|
181
|
-
api_client: BaseApiClient,
|
182
176
|
from_object: Union[dict[str, Any], object],
|
183
177
|
parent_object: Optional[dict[str, Any]] = None,
|
184
178
|
) -> dict[str, Any]:
|
@@ -203,7 +197,6 @@ def _ListBatchJobsConfig_to_vertex(
|
|
203
197
|
|
204
198
|
|
205
199
|
def _ListBatchJobsParameters_to_vertex(
|
206
|
-
api_client: BaseApiClient,
|
207
200
|
from_object: Union[dict[str, Any], object],
|
208
201
|
parent_object: Optional[dict[str, Any]] = None,
|
209
202
|
) -> dict[str, Any]:
|
@@ -213,7 +206,7 @@ def _ListBatchJobsParameters_to_vertex(
|
|
213
206
|
to_object,
|
214
207
|
['config'],
|
215
208
|
_ListBatchJobsConfig_to_vertex(
|
216
|
-
|
209
|
+
getv(from_object, ['config']), to_object
|
217
210
|
),
|
218
211
|
)
|
219
212
|
|
@@ -240,7 +233,6 @@ def _DeleteBatchJobParameters_to_vertex(
|
|
240
233
|
|
241
234
|
|
242
235
|
def _JobError_from_vertex(
|
243
|
-
api_client: BaseApiClient,
|
244
236
|
from_object: Union[dict[str, Any], object],
|
245
237
|
parent_object: Optional[dict[str, Any]] = None,
|
246
238
|
) -> dict[str, Any]:
|
@@ -258,7 +250,6 @@ def _JobError_from_vertex(
|
|
258
250
|
|
259
251
|
|
260
252
|
def _BatchJobSource_from_vertex(
|
261
|
-
api_client: BaseApiClient,
|
262
253
|
from_object: Union[dict[str, Any], object],
|
263
254
|
parent_object: Optional[dict[str, Any]] = None,
|
264
255
|
) -> dict[str, Any]:
|
@@ -280,7 +271,6 @@ def _BatchJobSource_from_vertex(
|
|
280
271
|
|
281
272
|
|
282
273
|
def _BatchJobDestination_from_vertex(
|
283
|
-
api_client: BaseApiClient,
|
284
274
|
from_object: Union[dict[str, Any], object],
|
285
275
|
parent_object: Optional[dict[str, Any]] = None,
|
286
276
|
) -> dict[str, Any]:
|
@@ -306,7 +296,6 @@ def _BatchJobDestination_from_vertex(
|
|
306
296
|
|
307
297
|
|
308
298
|
def _BatchJob_from_vertex(
|
309
|
-
api_client: BaseApiClient,
|
310
299
|
from_object: Union[dict[str, Any], object],
|
311
300
|
parent_object: Optional[dict[str, Any]] = None,
|
312
301
|
) -> dict[str, Any]:
|
@@ -324,9 +313,7 @@ def _BatchJob_from_vertex(
|
|
324
313
|
setv(
|
325
314
|
to_object,
|
326
315
|
['error'],
|
327
|
-
_JobError_from_vertex(
|
328
|
-
api_client, getv(from_object, ['error']), to_object
|
329
|
-
),
|
316
|
+
_JobError_from_vertex(getv(from_object, ['error']), to_object),
|
330
317
|
)
|
331
318
|
|
332
319
|
if getv(from_object, ['createTime']) is not None:
|
@@ -349,7 +336,7 @@ def _BatchJob_from_vertex(
|
|
349
336
|
to_object,
|
350
337
|
['src'],
|
351
338
|
_BatchJobSource_from_vertex(
|
352
|
-
|
339
|
+
getv(from_object, ['inputConfig']), to_object
|
353
340
|
),
|
354
341
|
)
|
355
342
|
|
@@ -358,7 +345,7 @@ def _BatchJob_from_vertex(
|
|
358
345
|
to_object,
|
359
346
|
['dest'],
|
360
347
|
_BatchJobDestination_from_vertex(
|
361
|
-
|
348
|
+
getv(from_object, ['outputConfig']), to_object
|
362
349
|
),
|
363
350
|
)
|
364
351
|
|
@@ -366,7 +353,6 @@ def _BatchJob_from_vertex(
|
|
366
353
|
|
367
354
|
|
368
355
|
def _ListBatchJobsResponse_from_vertex(
|
369
|
-
api_client: BaseApiClient,
|
370
356
|
from_object: Union[dict[str, Any], object],
|
371
357
|
parent_object: Optional[dict[str, Any]] = None,
|
372
358
|
) -> dict[str, Any]:
|
@@ -379,7 +365,7 @@ def _ListBatchJobsResponse_from_vertex(
|
|
379
365
|
to_object,
|
380
366
|
['batch_jobs'],
|
381
367
|
[
|
382
|
-
_BatchJob_from_vertex(
|
368
|
+
_BatchJob_from_vertex(item, to_object)
|
383
369
|
for item in getv(from_object, ['batchPredictionJobs'])
|
384
370
|
],
|
385
371
|
)
|
@@ -388,7 +374,6 @@ def _ListBatchJobsResponse_from_vertex(
|
|
388
374
|
|
389
375
|
|
390
376
|
def _DeleteResourceJob_from_vertex(
|
391
|
-
api_client: BaseApiClient,
|
392
377
|
from_object: Union[dict[str, Any], object],
|
393
378
|
parent_object: Optional[dict[str, Any]] = None,
|
394
379
|
) -> dict[str, Any]:
|
@@ -403,9 +388,7 @@ def _DeleteResourceJob_from_vertex(
|
|
403
388
|
setv(
|
404
389
|
to_object,
|
405
390
|
['error'],
|
406
|
-
_JobError_from_vertex(
|
407
|
-
api_client, getv(from_object, ['error']), to_object
|
408
|
-
),
|
391
|
+
_JobError_from_vertex(getv(from_object, ['error']), to_object),
|
409
392
|
)
|
410
393
|
|
411
394
|
return to_object
|
@@ -455,12 +438,14 @@ class Batches(_api_module.BaseModule):
|
|
455
438
|
request_dict = _common.convert_to_dict(request_dict)
|
456
439
|
request_dict = _common.encode_unserializable_types(request_dict)
|
457
440
|
|
458
|
-
|
441
|
+
response = self._api_client.request(
|
459
442
|
'post', path, request_dict, http_options
|
460
443
|
)
|
461
444
|
|
445
|
+
response_dict = '' if not response.body else json.loads(response.body)
|
446
|
+
|
462
447
|
if self._api_client.vertexai:
|
463
|
-
response_dict = _BatchJob_from_vertex(
|
448
|
+
response_dict = _BatchJob_from_vertex(response_dict)
|
464
449
|
|
465
450
|
return_value = types.BatchJob._from_response(
|
466
451
|
response=response_dict, kwargs=parameter_model.model_dump()
|
@@ -523,12 +508,12 @@ class Batches(_api_module.BaseModule):
|
|
523
508
|
request_dict = _common.convert_to_dict(request_dict)
|
524
509
|
request_dict = _common.encode_unserializable_types(request_dict)
|
525
510
|
|
526
|
-
|
527
|
-
|
528
|
-
)
|
511
|
+
response = self._api_client.request('get', path, request_dict, http_options)
|
512
|
+
|
513
|
+
response_dict = '' if not response.body else json.loads(response.body)
|
529
514
|
|
530
515
|
if self._api_client.vertexai:
|
531
|
-
response_dict = _BatchJob_from_vertex(
|
516
|
+
response_dict = _BatchJob_from_vertex(response_dict)
|
532
517
|
|
533
518
|
return_value = types.BatchJob._from_response(
|
534
519
|
response=response_dict, kwargs=parameter_model.model_dump()
|
@@ -592,7 +577,7 @@ class Batches(_api_module.BaseModule):
|
|
592
577
|
request_dict = _common.convert_to_dict(request_dict)
|
593
578
|
request_dict = _common.encode_unserializable_types(request_dict)
|
594
579
|
|
595
|
-
|
580
|
+
response = self._api_client.request(
|
596
581
|
'post', path, request_dict, http_options
|
597
582
|
)
|
598
583
|
|
@@ -607,9 +592,7 @@ class Batches(_api_module.BaseModule):
|
|
607
592
|
if not self._api_client.vertexai:
|
608
593
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
609
594
|
else:
|
610
|
-
request_dict = _ListBatchJobsParameters_to_vertex(
|
611
|
-
self._api_client, parameter_model
|
612
|
-
)
|
595
|
+
request_dict = _ListBatchJobsParameters_to_vertex(parameter_model)
|
613
596
|
request_url_dict = request_dict.get('_url')
|
614
597
|
if request_url_dict:
|
615
598
|
path = 'batchPredictionJobs'.format_map(request_url_dict)
|
@@ -632,14 +615,12 @@ class Batches(_api_module.BaseModule):
|
|
632
615
|
request_dict = _common.convert_to_dict(request_dict)
|
633
616
|
request_dict = _common.encode_unserializable_types(request_dict)
|
634
617
|
|
635
|
-
|
636
|
-
|
637
|
-
)
|
618
|
+
response = self._api_client.request('get', path, request_dict, http_options)
|
619
|
+
|
620
|
+
response_dict = '' if not response.body else json.loads(response.body)
|
638
621
|
|
639
622
|
if self._api_client.vertexai:
|
640
|
-
response_dict = _ListBatchJobsResponse_from_vertex(
|
641
|
-
self._api_client, response_dict
|
642
|
-
)
|
623
|
+
response_dict = _ListBatchJobsResponse_from_vertex(response_dict)
|
643
624
|
|
644
625
|
return_value = types.ListBatchJobsResponse._from_response(
|
645
626
|
response=response_dict, kwargs=parameter_model.model_dump()
|
@@ -704,14 +685,14 @@ class Batches(_api_module.BaseModule):
|
|
704
685
|
request_dict = _common.convert_to_dict(request_dict)
|
705
686
|
request_dict = _common.encode_unserializable_types(request_dict)
|
706
687
|
|
707
|
-
|
688
|
+
response = self._api_client.request(
|
708
689
|
'delete', path, request_dict, http_options
|
709
690
|
)
|
710
691
|
|
692
|
+
response_dict = '' if not response.body else json.loads(response.body)
|
693
|
+
|
711
694
|
if self._api_client.vertexai:
|
712
|
-
response_dict = _DeleteResourceJob_from_vertex(
|
713
|
-
self._api_client, response_dict
|
714
|
-
)
|
695
|
+
response_dict = _DeleteResourceJob_from_vertex(response_dict)
|
715
696
|
|
716
697
|
return_value = types.DeleteResourceJob._from_response(
|
717
698
|
response=response_dict, kwargs=parameter_model.model_dump()
|
@@ -825,12 +806,14 @@ class AsyncBatches(_api_module.BaseModule):
|
|
825
806
|
request_dict = _common.convert_to_dict(request_dict)
|
826
807
|
request_dict = _common.encode_unserializable_types(request_dict)
|
827
808
|
|
828
|
-
|
809
|
+
response = await self._api_client.async_request(
|
829
810
|
'post', path, request_dict, http_options
|
830
811
|
)
|
831
812
|
|
813
|
+
response_dict = '' if not response.body else json.loads(response.body)
|
814
|
+
|
832
815
|
if self._api_client.vertexai:
|
833
|
-
response_dict = _BatchJob_from_vertex(
|
816
|
+
response_dict = _BatchJob_from_vertex(response_dict)
|
834
817
|
|
835
818
|
return_value = types.BatchJob._from_response(
|
836
819
|
response=response_dict, kwargs=parameter_model.model_dump()
|
@@ -893,12 +876,14 @@ class AsyncBatches(_api_module.BaseModule):
|
|
893
876
|
request_dict = _common.convert_to_dict(request_dict)
|
894
877
|
request_dict = _common.encode_unserializable_types(request_dict)
|
895
878
|
|
896
|
-
|
879
|
+
response = await self._api_client.async_request(
|
897
880
|
'get', path, request_dict, http_options
|
898
881
|
)
|
899
882
|
|
883
|
+
response_dict = '' if not response.body else json.loads(response.body)
|
884
|
+
|
900
885
|
if self._api_client.vertexai:
|
901
|
-
response_dict = _BatchJob_from_vertex(
|
886
|
+
response_dict = _BatchJob_from_vertex(response_dict)
|
902
887
|
|
903
888
|
return_value = types.BatchJob._from_response(
|
904
889
|
response=response_dict, kwargs=parameter_model.model_dump()
|
@@ -962,7 +947,7 @@ class AsyncBatches(_api_module.BaseModule):
|
|
962
947
|
request_dict = _common.convert_to_dict(request_dict)
|
963
948
|
request_dict = _common.encode_unserializable_types(request_dict)
|
964
949
|
|
965
|
-
|
950
|
+
response = await self._api_client.async_request(
|
966
951
|
'post', path, request_dict, http_options
|
967
952
|
)
|
968
953
|
|
@@ -977,9 +962,7 @@ class AsyncBatches(_api_module.BaseModule):
|
|
977
962
|
if not self._api_client.vertexai:
|
978
963
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
979
964
|
else:
|
980
|
-
request_dict = _ListBatchJobsParameters_to_vertex(
|
981
|
-
self._api_client, parameter_model
|
982
|
-
)
|
965
|
+
request_dict = _ListBatchJobsParameters_to_vertex(parameter_model)
|
983
966
|
request_url_dict = request_dict.get('_url')
|
984
967
|
if request_url_dict:
|
985
968
|
path = 'batchPredictionJobs'.format_map(request_url_dict)
|
@@ -1002,14 +985,14 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1002
985
|
request_dict = _common.convert_to_dict(request_dict)
|
1003
986
|
request_dict = _common.encode_unserializable_types(request_dict)
|
1004
987
|
|
1005
|
-
|
988
|
+
response = await self._api_client.async_request(
|
1006
989
|
'get', path, request_dict, http_options
|
1007
990
|
)
|
1008
991
|
|
992
|
+
response_dict = '' if not response.body else json.loads(response.body)
|
993
|
+
|
1009
994
|
if self._api_client.vertexai:
|
1010
|
-
response_dict = _ListBatchJobsResponse_from_vertex(
|
1011
|
-
self._api_client, response_dict
|
1012
|
-
)
|
995
|
+
response_dict = _ListBatchJobsResponse_from_vertex(response_dict)
|
1013
996
|
|
1014
997
|
return_value = types.ListBatchJobsResponse._from_response(
|
1015
998
|
response=response_dict, kwargs=parameter_model.model_dump()
|
@@ -1074,14 +1057,14 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1074
1057
|
request_dict = _common.convert_to_dict(request_dict)
|
1075
1058
|
request_dict = _common.encode_unserializable_types(request_dict)
|
1076
1059
|
|
1077
|
-
|
1060
|
+
response = await self._api_client.async_request(
|
1078
1061
|
'delete', path, request_dict, http_options
|
1079
1062
|
)
|
1080
1063
|
|
1064
|
+
response_dict = '' if not response.body else json.loads(response.body)
|
1065
|
+
|
1081
1066
|
if self._api_client.vertexai:
|
1082
|
-
response_dict = _DeleteResourceJob_from_vertex(
|
1083
|
-
self._api_client, response_dict
|
1084
|
-
)
|
1067
|
+
response_dict = _DeleteResourceJob_from_vertex(response_dict)
|
1085
1068
|
|
1086
1069
|
return_value = types.DeleteResourceJob._from_response(
|
1087
1070
|
response=response_dict, kwargs=parameter_model.model_dump()
|