google-genai 1.7.0__py3-none-any.whl → 1.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/_api_client.py +93 -78
- google/genai/_replay_api_client.py +22 -14
- google/genai/_transformers.py +81 -16
- google/genai/batches.py +61 -295
- google/genai/caches.py +546 -526
- google/genai/chats.py +15 -8
- google/genai/client.py +5 -3
- google/genai/errors.py +47 -24
- google/genai/files.py +89 -305
- google/genai/live.py +466 -12
- google/genai/models.py +1992 -2291
- google/genai/operations.py +104 -124
- google/genai/tunings.py +256 -272
- google/genai/types.py +394 -98
- google/genai/version.py +1 -1
- {google_genai-1.7.0.dist-info → google_genai-1.9.0.dist-info}/METADATA +3 -2
- google_genai-1.9.0.dist-info/RECORD +27 -0
- {google_genai-1.7.0.dist-info → google_genai-1.9.0.dist-info}/WHEEL +1 -1
- google_genai-1.7.0.dist-info/RECORD +0 -27
- {google_genai-1.7.0.dist-info → google_genai-1.9.0.dist-info/licenses}/LICENSE +0 -0
- {google_genai-1.7.0.dist-info → google_genai-1.9.0.dist-info}/top_level.txt +0 -0
google/genai/batches.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
# Copyright
|
1
|
+
# Copyright 2025 Google LLC
|
2
2
|
#
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
4
|
# you may not use this file except in compliance with the License.
|
@@ -31,24 +31,6 @@ from .pagers import AsyncPager, Pager
|
|
31
31
|
logger = logging.getLogger('google_genai.batches')
|
32
32
|
|
33
33
|
|
34
|
-
def _BatchJobSource_to_mldev(
|
35
|
-
api_client: BaseApiClient,
|
36
|
-
from_object: Union[dict, object],
|
37
|
-
parent_object: Optional[dict] = None,
|
38
|
-
) -> dict:
|
39
|
-
to_object: dict[str, Any] = {}
|
40
|
-
if getv(from_object, ['format']) is not None:
|
41
|
-
raise ValueError('format parameter is not supported in Gemini API.')
|
42
|
-
|
43
|
-
if getv(from_object, ['gcs_uri']) is not None:
|
44
|
-
raise ValueError('gcs_uri parameter is not supported in Gemini API.')
|
45
|
-
|
46
|
-
if getv(from_object, ['bigquery_uri']) is not None:
|
47
|
-
raise ValueError('bigquery_uri parameter is not supported in Gemini API.')
|
48
|
-
|
49
|
-
return to_object
|
50
|
-
|
51
|
-
|
52
34
|
def _BatchJobSource_to_vertex(
|
53
35
|
api_client: BaseApiClient,
|
54
36
|
from_object: Union[dict, object],
|
@@ -71,24 +53,6 @@ def _BatchJobSource_to_vertex(
|
|
71
53
|
return to_object
|
72
54
|
|
73
55
|
|
74
|
-
def _BatchJobDestination_to_mldev(
|
75
|
-
api_client: BaseApiClient,
|
76
|
-
from_object: Union[dict, object],
|
77
|
-
parent_object: Optional[dict] = None,
|
78
|
-
) -> dict:
|
79
|
-
to_object: dict[str, Any] = {}
|
80
|
-
if getv(from_object, ['format']) is not None:
|
81
|
-
raise ValueError('format parameter is not supported in Gemini API.')
|
82
|
-
|
83
|
-
if getv(from_object, ['gcs_uri']) is not None:
|
84
|
-
raise ValueError('gcs_uri parameter is not supported in Gemini API.')
|
85
|
-
|
86
|
-
if getv(from_object, ['bigquery_uri']) is not None:
|
87
|
-
raise ValueError('bigquery_uri parameter is not supported in Gemini API.')
|
88
|
-
|
89
|
-
return to_object
|
90
|
-
|
91
|
-
|
92
56
|
def _BatchJobDestination_to_vertex(
|
93
57
|
api_client: BaseApiClient,
|
94
58
|
from_object: Union[dict, object],
|
@@ -115,22 +79,6 @@ def _BatchJobDestination_to_vertex(
|
|
115
79
|
return to_object
|
116
80
|
|
117
81
|
|
118
|
-
def _CreateBatchJobConfig_to_mldev(
|
119
|
-
api_client: BaseApiClient,
|
120
|
-
from_object: Union[dict, object],
|
121
|
-
parent_object: Optional[dict] = None,
|
122
|
-
) -> dict:
|
123
|
-
to_object: dict[str, Any] = {}
|
124
|
-
|
125
|
-
if getv(from_object, ['display_name']) is not None:
|
126
|
-
setv(parent_object, ['displayName'], getv(from_object, ['display_name']))
|
127
|
-
|
128
|
-
if getv(from_object, ['dest']) is not None:
|
129
|
-
raise ValueError('dest parameter is not supported in Gemini API.')
|
130
|
-
|
131
|
-
return to_object
|
132
|
-
|
133
|
-
|
134
82
|
def _CreateBatchJobConfig_to_vertex(
|
135
83
|
api_client: BaseApiClient,
|
136
84
|
from_object: Union[dict, object],
|
@@ -155,30 +103,6 @@ def _CreateBatchJobConfig_to_vertex(
|
|
155
103
|
return to_object
|
156
104
|
|
157
105
|
|
158
|
-
def _CreateBatchJobParameters_to_mldev(
|
159
|
-
api_client: BaseApiClient,
|
160
|
-
from_object: Union[dict, object],
|
161
|
-
parent_object: Optional[dict] = None,
|
162
|
-
) -> dict:
|
163
|
-
to_object: dict[str, Any] = {}
|
164
|
-
if getv(from_object, ['model']) is not None:
|
165
|
-
raise ValueError('model parameter is not supported in Gemini API.')
|
166
|
-
|
167
|
-
if getv(from_object, ['src']) is not None:
|
168
|
-
raise ValueError('src parameter is not supported in Gemini API.')
|
169
|
-
|
170
|
-
if getv(from_object, ['config']) is not None:
|
171
|
-
setv(
|
172
|
-
to_object,
|
173
|
-
['config'],
|
174
|
-
_CreateBatchJobConfig_to_mldev(
|
175
|
-
api_client, getv(from_object, ['config']), to_object
|
176
|
-
),
|
177
|
-
)
|
178
|
-
|
179
|
-
return to_object
|
180
|
-
|
181
|
-
|
182
106
|
def _CreateBatchJobParameters_to_vertex(
|
183
107
|
api_client: BaseApiClient,
|
184
108
|
from_object: Union[dict, object],
|
@@ -215,21 +139,6 @@ def _CreateBatchJobParameters_to_vertex(
|
|
215
139
|
return to_object
|
216
140
|
|
217
141
|
|
218
|
-
def _GetBatchJobParameters_to_mldev(
|
219
|
-
api_client: BaseApiClient,
|
220
|
-
from_object: Union[dict, object],
|
221
|
-
parent_object: Optional[dict] = None,
|
222
|
-
) -> dict:
|
223
|
-
to_object: dict[str, Any] = {}
|
224
|
-
if getv(from_object, ['name']) is not None:
|
225
|
-
raise ValueError('name parameter is not supported in Gemini API.')
|
226
|
-
|
227
|
-
if getv(from_object, ['config']) is not None:
|
228
|
-
setv(to_object, ['config'], getv(from_object, ['config']))
|
229
|
-
|
230
|
-
return to_object
|
231
|
-
|
232
|
-
|
233
142
|
def _GetBatchJobParameters_to_vertex(
|
234
143
|
api_client: BaseApiClient,
|
235
144
|
from_object: Union[dict, object],
|
@@ -249,21 +158,6 @@ def _GetBatchJobParameters_to_vertex(
|
|
249
158
|
return to_object
|
250
159
|
|
251
160
|
|
252
|
-
def _CancelBatchJobParameters_to_mldev(
|
253
|
-
api_client: BaseApiClient,
|
254
|
-
from_object: Union[dict, object],
|
255
|
-
parent_object: Optional[dict] = None,
|
256
|
-
) -> dict:
|
257
|
-
to_object: dict[str, Any] = {}
|
258
|
-
if getv(from_object, ['name']) is not None:
|
259
|
-
raise ValueError('name parameter is not supported in Gemini API.')
|
260
|
-
|
261
|
-
if getv(from_object, ['config']) is not None:
|
262
|
-
setv(to_object, ['config'], getv(from_object, ['config']))
|
263
|
-
|
264
|
-
return to_object
|
265
|
-
|
266
|
-
|
267
161
|
def _CancelBatchJobParameters_to_vertex(
|
268
162
|
api_client: BaseApiClient,
|
269
163
|
from_object: Union[dict, object],
|
@@ -283,31 +177,6 @@ def _CancelBatchJobParameters_to_vertex(
|
|
283
177
|
return to_object
|
284
178
|
|
285
179
|
|
286
|
-
def _ListBatchJobsConfig_to_mldev(
|
287
|
-
api_client: BaseApiClient,
|
288
|
-
from_object: Union[dict, object],
|
289
|
-
parent_object: Optional[dict] = None,
|
290
|
-
) -> dict:
|
291
|
-
to_object: dict[str, Any] = {}
|
292
|
-
|
293
|
-
if getv(from_object, ['page_size']) is not None:
|
294
|
-
setv(
|
295
|
-
parent_object, ['_query', 'pageSize'], getv(from_object, ['page_size'])
|
296
|
-
)
|
297
|
-
|
298
|
-
if getv(from_object, ['page_token']) is not None:
|
299
|
-
setv(
|
300
|
-
parent_object,
|
301
|
-
['_query', 'pageToken'],
|
302
|
-
getv(from_object, ['page_token']),
|
303
|
-
)
|
304
|
-
|
305
|
-
if getv(from_object, ['filter']) is not None:
|
306
|
-
raise ValueError('filter parameter is not supported in Gemini API.')
|
307
|
-
|
308
|
-
return to_object
|
309
|
-
|
310
|
-
|
311
180
|
def _ListBatchJobsConfig_to_vertex(
|
312
181
|
api_client: BaseApiClient,
|
313
182
|
from_object: Union[dict, object],
|
@@ -333,18 +202,6 @@ def _ListBatchJobsConfig_to_vertex(
|
|
333
202
|
return to_object
|
334
203
|
|
335
204
|
|
336
|
-
def _ListBatchJobsParameters_to_mldev(
|
337
|
-
api_client: BaseApiClient,
|
338
|
-
from_object: Union[dict, object],
|
339
|
-
parent_object: Optional[dict] = None,
|
340
|
-
) -> dict:
|
341
|
-
to_object: dict[str, Any] = {}
|
342
|
-
if getv(from_object, ['config']) is not None:
|
343
|
-
raise ValueError('config parameter is not supported in Gemini API.')
|
344
|
-
|
345
|
-
return to_object
|
346
|
-
|
347
|
-
|
348
205
|
def _ListBatchJobsParameters_to_vertex(
|
349
206
|
api_client: BaseApiClient,
|
350
207
|
from_object: Union[dict, object],
|
@@ -363,21 +220,6 @@ def _ListBatchJobsParameters_to_vertex(
|
|
363
220
|
return to_object
|
364
221
|
|
365
222
|
|
366
|
-
def _DeleteBatchJobParameters_to_mldev(
|
367
|
-
api_client: BaseApiClient,
|
368
|
-
from_object: Union[dict, object],
|
369
|
-
parent_object: Optional[dict] = None,
|
370
|
-
) -> dict:
|
371
|
-
to_object: dict[str, Any] = {}
|
372
|
-
if getv(from_object, ['name']) is not None:
|
373
|
-
raise ValueError('name parameter is not supported in Gemini API.')
|
374
|
-
|
375
|
-
if getv(from_object, ['config']) is not None:
|
376
|
-
setv(to_object, ['config'], getv(from_object, ['config']))
|
377
|
-
|
378
|
-
return to_object
|
379
|
-
|
380
|
-
|
381
223
|
def _DeleteBatchJobParameters_to_vertex(
|
382
224
|
api_client: BaseApiClient,
|
383
225
|
from_object: Union[dict, object],
|
@@ -397,16 +239,6 @@ def _DeleteBatchJobParameters_to_vertex(
|
|
397
239
|
return to_object
|
398
240
|
|
399
241
|
|
400
|
-
def _JobError_from_mldev(
|
401
|
-
api_client: BaseApiClient,
|
402
|
-
from_object: Union[dict, object],
|
403
|
-
parent_object: Optional[dict] = None,
|
404
|
-
) -> dict:
|
405
|
-
to_object: dict[str, Any] = {}
|
406
|
-
|
407
|
-
return to_object
|
408
|
-
|
409
|
-
|
410
242
|
def _JobError_from_vertex(
|
411
243
|
api_client: BaseApiClient,
|
412
244
|
from_object: Union[dict, object],
|
@@ -425,16 +257,6 @@ def _JobError_from_vertex(
|
|
425
257
|
return to_object
|
426
258
|
|
427
259
|
|
428
|
-
def _BatchJobSource_from_mldev(
|
429
|
-
api_client: BaseApiClient,
|
430
|
-
from_object: Union[dict, object],
|
431
|
-
parent_object: Optional[dict] = None,
|
432
|
-
) -> dict:
|
433
|
-
to_object: dict[str, Any] = {}
|
434
|
-
|
435
|
-
return to_object
|
436
|
-
|
437
|
-
|
438
260
|
def _BatchJobSource_from_vertex(
|
439
261
|
api_client: BaseApiClient,
|
440
262
|
from_object: Union[dict, object],
|
@@ -457,16 +279,6 @@ def _BatchJobSource_from_vertex(
|
|
457
279
|
return to_object
|
458
280
|
|
459
281
|
|
460
|
-
def _BatchJobDestination_from_mldev(
|
461
|
-
api_client: BaseApiClient,
|
462
|
-
from_object: Union[dict, object],
|
463
|
-
parent_object: Optional[dict] = None,
|
464
|
-
) -> dict:
|
465
|
-
to_object: dict[str, Any] = {}
|
466
|
-
|
467
|
-
return to_object
|
468
|
-
|
469
|
-
|
470
282
|
def _BatchJobDestination_from_vertex(
|
471
283
|
api_client: BaseApiClient,
|
472
284
|
from_object: Union[dict, object],
|
@@ -493,16 +305,6 @@ def _BatchJobDestination_from_vertex(
|
|
493
305
|
return to_object
|
494
306
|
|
495
307
|
|
496
|
-
def _BatchJob_from_mldev(
|
497
|
-
api_client: BaseApiClient,
|
498
|
-
from_object: Union[dict, object],
|
499
|
-
parent_object: Optional[dict] = None,
|
500
|
-
) -> dict:
|
501
|
-
to_object: dict[str, Any] = {}
|
502
|
-
|
503
|
-
return to_object
|
504
|
-
|
505
|
-
|
506
308
|
def _BatchJob_from_vertex(
|
507
309
|
api_client: BaseApiClient,
|
508
310
|
from_object: Union[dict, object],
|
@@ -563,18 +365,6 @@ def _BatchJob_from_vertex(
|
|
563
365
|
return to_object
|
564
366
|
|
565
367
|
|
566
|
-
def _ListBatchJobsResponse_from_mldev(
|
567
|
-
api_client: BaseApiClient,
|
568
|
-
from_object: Union[dict, object],
|
569
|
-
parent_object: Optional[dict] = None,
|
570
|
-
) -> dict:
|
571
|
-
to_object: dict[str, Any] = {}
|
572
|
-
if getv(from_object, ['nextPageToken']) is not None:
|
573
|
-
setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
|
574
|
-
|
575
|
-
return to_object
|
576
|
-
|
577
|
-
|
578
368
|
def _ListBatchJobsResponse_from_vertex(
|
579
369
|
api_client: BaseApiClient,
|
580
370
|
from_object: Union[dict, object],
|
@@ -597,16 +387,6 @@ def _ListBatchJobsResponse_from_vertex(
|
|
597
387
|
return to_object
|
598
388
|
|
599
389
|
|
600
|
-
def _DeleteResourceJob_from_mldev(
|
601
|
-
api_client: BaseApiClient,
|
602
|
-
from_object: Union[dict, object],
|
603
|
-
parent_object: Optional[dict] = None,
|
604
|
-
) -> dict:
|
605
|
-
to_object: dict[str, Any] = {}
|
606
|
-
|
607
|
-
return to_object
|
608
|
-
|
609
|
-
|
610
390
|
def _DeleteResourceJob_from_vertex(
|
611
391
|
api_client: BaseApiClient,
|
612
392
|
from_object: Union[dict, object],
|
@@ -665,11 +445,12 @@ class Batches(_api_module.BaseModule):
|
|
665
445
|
# TODO: remove the hack that pops config.
|
666
446
|
request_dict.pop('config', None)
|
667
447
|
|
668
|
-
http_options: Optional[types.
|
669
|
-
if
|
670
|
-
|
671
|
-
|
672
|
-
|
448
|
+
http_options: Optional[types.HttpOptions] = None
|
449
|
+
if (
|
450
|
+
parameter_model.config is not None
|
451
|
+
and parameter_model.config.http_options is not None
|
452
|
+
):
|
453
|
+
http_options = parameter_model.config.http_options
|
673
454
|
|
674
455
|
request_dict = _common.convert_to_dict(request_dict)
|
675
456
|
request_dict = _common.encode_unserializable_types(request_dict)
|
@@ -680,8 +461,6 @@ class Batches(_api_module.BaseModule):
|
|
680
461
|
|
681
462
|
if self._api_client.vertexai:
|
682
463
|
response_dict = _BatchJob_from_vertex(self._api_client, response_dict)
|
683
|
-
else:
|
684
|
-
response_dict = _BatchJob_from_mldev(self._api_client, response_dict)
|
685
464
|
|
686
465
|
return_value = types.BatchJob._from_response(
|
687
466
|
response=response_dict, kwargs=parameter_model.model_dump()
|
@@ -734,11 +513,12 @@ class Batches(_api_module.BaseModule):
|
|
734
513
|
# TODO: remove the hack that pops config.
|
735
514
|
request_dict.pop('config', None)
|
736
515
|
|
737
|
-
http_options: Optional[types.
|
738
|
-
if
|
739
|
-
|
740
|
-
|
741
|
-
|
516
|
+
http_options: Optional[types.HttpOptions] = None
|
517
|
+
if (
|
518
|
+
parameter_model.config is not None
|
519
|
+
and parameter_model.config.http_options is not None
|
520
|
+
):
|
521
|
+
http_options = parameter_model.config.http_options
|
742
522
|
|
743
523
|
request_dict = _common.convert_to_dict(request_dict)
|
744
524
|
request_dict = _common.encode_unserializable_types(request_dict)
|
@@ -749,8 +529,6 @@ class Batches(_api_module.BaseModule):
|
|
749
529
|
|
750
530
|
if self._api_client.vertexai:
|
751
531
|
response_dict = _BatchJob_from_vertex(self._api_client, response_dict)
|
752
|
-
else:
|
753
|
-
response_dict = _BatchJob_from_mldev(self._api_client, response_dict)
|
754
532
|
|
755
533
|
return_value = types.BatchJob._from_response(
|
756
534
|
response=response_dict, kwargs=parameter_model.model_dump()
|
@@ -804,11 +582,12 @@ class Batches(_api_module.BaseModule):
|
|
804
582
|
# TODO: remove the hack that pops config.
|
805
583
|
request_dict.pop('config', None)
|
806
584
|
|
807
|
-
http_options: Optional[types.
|
808
|
-
if
|
809
|
-
|
810
|
-
|
811
|
-
|
585
|
+
http_options: Optional[types.HttpOptions] = None
|
586
|
+
if (
|
587
|
+
parameter_model.config is not None
|
588
|
+
and parameter_model.config.http_options is not None
|
589
|
+
):
|
590
|
+
http_options = parameter_model.config.http_options
|
812
591
|
|
813
592
|
request_dict = _common.convert_to_dict(request_dict)
|
814
593
|
request_dict = _common.encode_unserializable_types(request_dict)
|
@@ -843,11 +622,12 @@ class Batches(_api_module.BaseModule):
|
|
843
622
|
# TODO: remove the hack that pops config.
|
844
623
|
request_dict.pop('config', None)
|
845
624
|
|
846
|
-
http_options: Optional[types.
|
847
|
-
if
|
848
|
-
|
849
|
-
|
850
|
-
|
625
|
+
http_options: Optional[types.HttpOptions] = None
|
626
|
+
if (
|
627
|
+
parameter_model.config is not None
|
628
|
+
and parameter_model.config.http_options is not None
|
629
|
+
):
|
630
|
+
http_options = parameter_model.config.http_options
|
851
631
|
|
852
632
|
request_dict = _common.convert_to_dict(request_dict)
|
853
633
|
request_dict = _common.encode_unserializable_types(request_dict)
|
@@ -860,10 +640,6 @@ class Batches(_api_module.BaseModule):
|
|
860
640
|
response_dict = _ListBatchJobsResponse_from_vertex(
|
861
641
|
self._api_client, response_dict
|
862
642
|
)
|
863
|
-
else:
|
864
|
-
response_dict = _ListBatchJobsResponse_from_mldev(
|
865
|
-
self._api_client, response_dict
|
866
|
-
)
|
867
643
|
|
868
644
|
return_value = types.ListBatchJobsResponse._from_response(
|
869
645
|
response=response_dict, kwargs=parameter_model.model_dump()
|
@@ -918,11 +694,12 @@ class Batches(_api_module.BaseModule):
|
|
918
694
|
# TODO: remove the hack that pops config.
|
919
695
|
request_dict.pop('config', None)
|
920
696
|
|
921
|
-
http_options: Optional[types.
|
922
|
-
if
|
923
|
-
|
924
|
-
|
925
|
-
|
697
|
+
http_options: Optional[types.HttpOptions] = None
|
698
|
+
if (
|
699
|
+
parameter_model.config is not None
|
700
|
+
and parameter_model.config.http_options is not None
|
701
|
+
):
|
702
|
+
http_options = parameter_model.config.http_options
|
926
703
|
|
927
704
|
request_dict = _common.convert_to_dict(request_dict)
|
928
705
|
request_dict = _common.encode_unserializable_types(request_dict)
|
@@ -935,10 +712,6 @@ class Batches(_api_module.BaseModule):
|
|
935
712
|
response_dict = _DeleteResourceJob_from_vertex(
|
936
713
|
self._api_client, response_dict
|
937
714
|
)
|
938
|
-
else:
|
939
|
-
response_dict = _DeleteResourceJob_from_mldev(
|
940
|
-
self._api_client, response_dict
|
941
|
-
)
|
942
715
|
|
943
716
|
return_value = types.DeleteResourceJob._from_response(
|
944
717
|
response=response_dict, kwargs=parameter_model.model_dump()
|
@@ -1042,11 +815,12 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1042
815
|
# TODO: remove the hack that pops config.
|
1043
816
|
request_dict.pop('config', None)
|
1044
817
|
|
1045
|
-
http_options: Optional[types.
|
1046
|
-
if
|
1047
|
-
|
1048
|
-
|
1049
|
-
|
818
|
+
http_options: Optional[types.HttpOptions] = None
|
819
|
+
if (
|
820
|
+
parameter_model.config is not None
|
821
|
+
and parameter_model.config.http_options is not None
|
822
|
+
):
|
823
|
+
http_options = parameter_model.config.http_options
|
1050
824
|
|
1051
825
|
request_dict = _common.convert_to_dict(request_dict)
|
1052
826
|
request_dict = _common.encode_unserializable_types(request_dict)
|
@@ -1057,8 +831,6 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1057
831
|
|
1058
832
|
if self._api_client.vertexai:
|
1059
833
|
response_dict = _BatchJob_from_vertex(self._api_client, response_dict)
|
1060
|
-
else:
|
1061
|
-
response_dict = _BatchJob_from_mldev(self._api_client, response_dict)
|
1062
834
|
|
1063
835
|
return_value = types.BatchJob._from_response(
|
1064
836
|
response=response_dict, kwargs=parameter_model.model_dump()
|
@@ -1111,11 +883,12 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1111
883
|
# TODO: remove the hack that pops config.
|
1112
884
|
request_dict.pop('config', None)
|
1113
885
|
|
1114
|
-
http_options: Optional[types.
|
1115
|
-
if
|
1116
|
-
|
1117
|
-
|
1118
|
-
|
886
|
+
http_options: Optional[types.HttpOptions] = None
|
887
|
+
if (
|
888
|
+
parameter_model.config is not None
|
889
|
+
and parameter_model.config.http_options is not None
|
890
|
+
):
|
891
|
+
http_options = parameter_model.config.http_options
|
1119
892
|
|
1120
893
|
request_dict = _common.convert_to_dict(request_dict)
|
1121
894
|
request_dict = _common.encode_unserializable_types(request_dict)
|
@@ -1126,8 +899,6 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1126
899
|
|
1127
900
|
if self._api_client.vertexai:
|
1128
901
|
response_dict = _BatchJob_from_vertex(self._api_client, response_dict)
|
1129
|
-
else:
|
1130
|
-
response_dict = _BatchJob_from_mldev(self._api_client, response_dict)
|
1131
902
|
|
1132
903
|
return_value = types.BatchJob._from_response(
|
1133
904
|
response=response_dict, kwargs=parameter_model.model_dump()
|
@@ -1181,11 +952,12 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1181
952
|
# TODO: remove the hack that pops config.
|
1182
953
|
request_dict.pop('config', None)
|
1183
954
|
|
1184
|
-
http_options: Optional[types.
|
1185
|
-
if
|
1186
|
-
|
1187
|
-
|
1188
|
-
|
955
|
+
http_options: Optional[types.HttpOptions] = None
|
956
|
+
if (
|
957
|
+
parameter_model.config is not None
|
958
|
+
and parameter_model.config.http_options is not None
|
959
|
+
):
|
960
|
+
http_options = parameter_model.config.http_options
|
1189
961
|
|
1190
962
|
request_dict = _common.convert_to_dict(request_dict)
|
1191
963
|
request_dict = _common.encode_unserializable_types(request_dict)
|
@@ -1220,11 +992,12 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1220
992
|
# TODO: remove the hack that pops config.
|
1221
993
|
request_dict.pop('config', None)
|
1222
994
|
|
1223
|
-
http_options: Optional[types.
|
1224
|
-
if
|
1225
|
-
|
1226
|
-
|
1227
|
-
|
995
|
+
http_options: Optional[types.HttpOptions] = None
|
996
|
+
if (
|
997
|
+
parameter_model.config is not None
|
998
|
+
and parameter_model.config.http_options is not None
|
999
|
+
):
|
1000
|
+
http_options = parameter_model.config.http_options
|
1228
1001
|
|
1229
1002
|
request_dict = _common.convert_to_dict(request_dict)
|
1230
1003
|
request_dict = _common.encode_unserializable_types(request_dict)
|
@@ -1237,10 +1010,6 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1237
1010
|
response_dict = _ListBatchJobsResponse_from_vertex(
|
1238
1011
|
self._api_client, response_dict
|
1239
1012
|
)
|
1240
|
-
else:
|
1241
|
-
response_dict = _ListBatchJobsResponse_from_mldev(
|
1242
|
-
self._api_client, response_dict
|
1243
|
-
)
|
1244
1013
|
|
1245
1014
|
return_value = types.ListBatchJobsResponse._from_response(
|
1246
1015
|
response=response_dict, kwargs=parameter_model.model_dump()
|
@@ -1295,11 +1064,12 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1295
1064
|
# TODO: remove the hack that pops config.
|
1296
1065
|
request_dict.pop('config', None)
|
1297
1066
|
|
1298
|
-
http_options: Optional[types.
|
1299
|
-
if
|
1300
|
-
|
1301
|
-
|
1302
|
-
|
1067
|
+
http_options: Optional[types.HttpOptions] = None
|
1068
|
+
if (
|
1069
|
+
parameter_model.config is not None
|
1070
|
+
and parameter_model.config.http_options is not None
|
1071
|
+
):
|
1072
|
+
http_options = parameter_model.config.http_options
|
1303
1073
|
|
1304
1074
|
request_dict = _common.convert_to_dict(request_dict)
|
1305
1075
|
request_dict = _common.encode_unserializable_types(request_dict)
|
@@ -1312,10 +1082,6 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1312
1082
|
response_dict = _DeleteResourceJob_from_vertex(
|
1313
1083
|
self._api_client, response_dict
|
1314
1084
|
)
|
1315
|
-
else:
|
1316
|
-
response_dict = _DeleteResourceJob_from_mldev(
|
1317
|
-
self._api_client, response_dict
|
1318
|
-
)
|
1319
1085
|
|
1320
1086
|
return_value = types.DeleteResourceJob._from_response(
|
1321
1087
|
response=response_dict, kwargs=parameter_model.model_dump()
|