google-genai 1.3.0__py3-none-any.whl → 1.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/_api_client.py +30 -26
- google/genai/_api_module.py +1 -1
- google/genai/_automatic_function_calling_util.py +12 -12
- google/genai/_common.py +2 -2
- google/genai/_extra_utils.py +7 -6
- google/genai/_replay_api_client.py +3 -2
- google/genai/_test_api_client.py +8 -8
- google/genai/_transformers.py +158 -47
- google/genai/batches.py +170 -124
- google/genai/caches.py +306 -206
- google/genai/chats.py +179 -35
- google/genai/client.py +3 -3
- google/genai/errors.py +1 -2
- google/genai/files.py +153 -110
- google/genai/live.py +73 -64
- google/genai/models.py +828 -591
- google/genai/operations.py +86 -56
- google/genai/pagers.py +5 -5
- google/genai/tunings.py +163 -103
- google/genai/types.py +174 -157
- google/genai/version.py +1 -1
- {google_genai-1.3.0.dist-info → google_genai-1.4.0.dist-info}/METADATA +7 -1
- google_genai-1.4.0.dist-info/RECORD +27 -0
- {google_genai-1.3.0.dist-info → google_genai-1.4.0.dist-info}/WHEEL +1 -1
- google_genai-1.3.0.dist-info/RECORD +0 -27
- {google_genai-1.3.0.dist-info → google_genai-1.4.0.dist-info}/LICENSE +0 -0
- {google_genai-1.3.0.dist-info → google_genai-1.4.0.dist-info}/top_level.txt +0 -0
google/genai/batches.py
CHANGED
@@ -16,14 +16,14 @@
|
|
16
16
|
# Code generated by the Google Gen AI SDK generator DO NOT EDIT.
|
17
17
|
|
18
18
|
import logging
|
19
|
-
from typing import Optional, Union
|
19
|
+
from typing import Any, Optional, Union
|
20
20
|
from urllib.parse import urlencode
|
21
21
|
from . import _api_module
|
22
22
|
from . import _common
|
23
23
|
from . import _extra_utils
|
24
24
|
from . import _transformers as t
|
25
25
|
from . import types
|
26
|
-
from ._api_client import
|
26
|
+
from ._api_client import BaseApiClient
|
27
27
|
from ._common import get_value_by_path as getv
|
28
28
|
from ._common import set_value_by_path as setv
|
29
29
|
from .pagers import AsyncPager, Pager
|
@@ -32,11 +32,11 @@ logger = logging.getLogger('google_genai.batches')
|
|
32
32
|
|
33
33
|
|
34
34
|
def _BatchJobSource_to_mldev(
|
35
|
-
api_client:
|
35
|
+
api_client: BaseApiClient,
|
36
36
|
from_object: Union[dict, object],
|
37
|
-
parent_object: dict = None,
|
37
|
+
parent_object: Optional[dict] = None,
|
38
38
|
) -> dict:
|
39
|
-
to_object = {}
|
39
|
+
to_object: dict[str, Any] = {}
|
40
40
|
if getv(from_object, ['format']) is not None:
|
41
41
|
raise ValueError('format parameter is not supported in Gemini API.')
|
42
42
|
|
@@ -50,11 +50,11 @@ def _BatchJobSource_to_mldev(
|
|
50
50
|
|
51
51
|
|
52
52
|
def _BatchJobSource_to_vertex(
|
53
|
-
api_client:
|
53
|
+
api_client: BaseApiClient,
|
54
54
|
from_object: Union[dict, object],
|
55
|
-
parent_object: dict = None,
|
55
|
+
parent_object: Optional[dict] = None,
|
56
56
|
) -> dict:
|
57
|
-
to_object = {}
|
57
|
+
to_object: dict[str, Any] = {}
|
58
58
|
if getv(from_object, ['format']) is not None:
|
59
59
|
setv(to_object, ['instancesFormat'], getv(from_object, ['format']))
|
60
60
|
|
@@ -72,11 +72,11 @@ def _BatchJobSource_to_vertex(
|
|
72
72
|
|
73
73
|
|
74
74
|
def _BatchJobDestination_to_mldev(
|
75
|
-
api_client:
|
75
|
+
api_client: BaseApiClient,
|
76
76
|
from_object: Union[dict, object],
|
77
|
-
parent_object: dict = None,
|
77
|
+
parent_object: Optional[dict] = None,
|
78
78
|
) -> dict:
|
79
|
-
to_object = {}
|
79
|
+
to_object: dict[str, Any] = {}
|
80
80
|
if getv(from_object, ['format']) is not None:
|
81
81
|
raise ValueError('format parameter is not supported in Gemini API.')
|
82
82
|
|
@@ -90,11 +90,11 @@ def _BatchJobDestination_to_mldev(
|
|
90
90
|
|
91
91
|
|
92
92
|
def _BatchJobDestination_to_vertex(
|
93
|
-
api_client:
|
93
|
+
api_client: BaseApiClient,
|
94
94
|
from_object: Union[dict, object],
|
95
|
-
parent_object: dict = None,
|
95
|
+
parent_object: Optional[dict] = None,
|
96
96
|
) -> dict:
|
97
|
-
to_object = {}
|
97
|
+
to_object: dict[str, Any] = {}
|
98
98
|
if getv(from_object, ['format']) is not None:
|
99
99
|
setv(to_object, ['predictionsFormat'], getv(from_object, ['format']))
|
100
100
|
|
@@ -116,11 +116,11 @@ def _BatchJobDestination_to_vertex(
|
|
116
116
|
|
117
117
|
|
118
118
|
def _CreateBatchJobConfig_to_mldev(
|
119
|
-
api_client:
|
119
|
+
api_client: BaseApiClient,
|
120
120
|
from_object: Union[dict, object],
|
121
|
-
parent_object: dict = None,
|
121
|
+
parent_object: Optional[dict] = None,
|
122
122
|
) -> dict:
|
123
|
-
to_object = {}
|
123
|
+
to_object: dict[str, Any] = {}
|
124
124
|
|
125
125
|
if getv(from_object, ['display_name']) is not None:
|
126
126
|
setv(parent_object, ['displayName'], getv(from_object, ['display_name']))
|
@@ -132,11 +132,11 @@ def _CreateBatchJobConfig_to_mldev(
|
|
132
132
|
|
133
133
|
|
134
134
|
def _CreateBatchJobConfig_to_vertex(
|
135
|
-
api_client:
|
135
|
+
api_client: BaseApiClient,
|
136
136
|
from_object: Union[dict, object],
|
137
|
-
parent_object: dict = None,
|
137
|
+
parent_object: Optional[dict] = None,
|
138
138
|
) -> dict:
|
139
|
-
to_object = {}
|
139
|
+
to_object: dict[str, Any] = {}
|
140
140
|
|
141
141
|
if getv(from_object, ['display_name']) is not None:
|
142
142
|
setv(parent_object, ['displayName'], getv(from_object, ['display_name']))
|
@@ -156,11 +156,11 @@ def _CreateBatchJobConfig_to_vertex(
|
|
156
156
|
|
157
157
|
|
158
158
|
def _CreateBatchJobParameters_to_mldev(
|
159
|
-
api_client:
|
159
|
+
api_client: BaseApiClient,
|
160
160
|
from_object: Union[dict, object],
|
161
|
-
parent_object: dict = None,
|
161
|
+
parent_object: Optional[dict] = None,
|
162
162
|
) -> dict:
|
163
|
-
to_object = {}
|
163
|
+
to_object: dict[str, Any] = {}
|
164
164
|
if getv(from_object, ['model']) is not None:
|
165
165
|
raise ValueError('model parameter is not supported in Gemini API.')
|
166
166
|
|
@@ -180,11 +180,11 @@ def _CreateBatchJobParameters_to_mldev(
|
|
180
180
|
|
181
181
|
|
182
182
|
def _CreateBatchJobParameters_to_vertex(
|
183
|
-
api_client:
|
183
|
+
api_client: BaseApiClient,
|
184
184
|
from_object: Union[dict, object],
|
185
|
-
parent_object: dict = None,
|
185
|
+
parent_object: Optional[dict] = None,
|
186
186
|
) -> dict:
|
187
|
-
to_object = {}
|
187
|
+
to_object: dict[str, Any] = {}
|
188
188
|
if getv(from_object, ['model']) is not None:
|
189
189
|
setv(
|
190
190
|
to_object,
|
@@ -216,11 +216,11 @@ def _CreateBatchJobParameters_to_vertex(
|
|
216
216
|
|
217
217
|
|
218
218
|
def _GetBatchJobParameters_to_mldev(
|
219
|
-
api_client:
|
219
|
+
api_client: BaseApiClient,
|
220
220
|
from_object: Union[dict, object],
|
221
|
-
parent_object: dict = None,
|
221
|
+
parent_object: Optional[dict] = None,
|
222
222
|
) -> dict:
|
223
|
-
to_object = {}
|
223
|
+
to_object: dict[str, Any] = {}
|
224
224
|
if getv(from_object, ['name']) is not None:
|
225
225
|
raise ValueError('name parameter is not supported in Gemini API.')
|
226
226
|
|
@@ -231,11 +231,11 @@ def _GetBatchJobParameters_to_mldev(
|
|
231
231
|
|
232
232
|
|
233
233
|
def _GetBatchJobParameters_to_vertex(
|
234
|
-
api_client:
|
234
|
+
api_client: BaseApiClient,
|
235
235
|
from_object: Union[dict, object],
|
236
|
-
parent_object: dict = None,
|
236
|
+
parent_object: Optional[dict] = None,
|
237
237
|
) -> dict:
|
238
|
-
to_object = {}
|
238
|
+
to_object: dict[str, Any] = {}
|
239
239
|
if getv(from_object, ['name']) is not None:
|
240
240
|
setv(
|
241
241
|
to_object,
|
@@ -250,11 +250,11 @@ def _GetBatchJobParameters_to_vertex(
|
|
250
250
|
|
251
251
|
|
252
252
|
def _CancelBatchJobParameters_to_mldev(
|
253
|
-
api_client:
|
253
|
+
api_client: BaseApiClient,
|
254
254
|
from_object: Union[dict, object],
|
255
|
-
parent_object: dict = None,
|
255
|
+
parent_object: Optional[dict] = None,
|
256
256
|
) -> dict:
|
257
|
-
to_object = {}
|
257
|
+
to_object: dict[str, Any] = {}
|
258
258
|
if getv(from_object, ['name']) is not None:
|
259
259
|
raise ValueError('name parameter is not supported in Gemini API.')
|
260
260
|
|
@@ -265,11 +265,11 @@ def _CancelBatchJobParameters_to_mldev(
|
|
265
265
|
|
266
266
|
|
267
267
|
def _CancelBatchJobParameters_to_vertex(
|
268
|
-
api_client:
|
268
|
+
api_client: BaseApiClient,
|
269
269
|
from_object: Union[dict, object],
|
270
|
-
parent_object: dict = None,
|
270
|
+
parent_object: Optional[dict] = None,
|
271
271
|
) -> dict:
|
272
|
-
to_object = {}
|
272
|
+
to_object: dict[str, Any] = {}
|
273
273
|
if getv(from_object, ['name']) is not None:
|
274
274
|
setv(
|
275
275
|
to_object,
|
@@ -284,11 +284,11 @@ def _CancelBatchJobParameters_to_vertex(
|
|
284
284
|
|
285
285
|
|
286
286
|
def _ListBatchJobsConfig_to_mldev(
|
287
|
-
api_client:
|
287
|
+
api_client: BaseApiClient,
|
288
288
|
from_object: Union[dict, object],
|
289
|
-
parent_object: dict = None,
|
289
|
+
parent_object: Optional[dict] = None,
|
290
290
|
) -> dict:
|
291
|
-
to_object = {}
|
291
|
+
to_object: dict[str, Any] = {}
|
292
292
|
|
293
293
|
if getv(from_object, ['page_size']) is not None:
|
294
294
|
setv(
|
@@ -309,11 +309,11 @@ def _ListBatchJobsConfig_to_mldev(
|
|
309
309
|
|
310
310
|
|
311
311
|
def _ListBatchJobsConfig_to_vertex(
|
312
|
-
api_client:
|
312
|
+
api_client: BaseApiClient,
|
313
313
|
from_object: Union[dict, object],
|
314
|
-
parent_object: dict = None,
|
314
|
+
parent_object: Optional[dict] = None,
|
315
315
|
) -> dict:
|
316
|
-
to_object = {}
|
316
|
+
to_object: dict[str, Any] = {}
|
317
317
|
|
318
318
|
if getv(from_object, ['page_size']) is not None:
|
319
319
|
setv(
|
@@ -334,11 +334,11 @@ def _ListBatchJobsConfig_to_vertex(
|
|
334
334
|
|
335
335
|
|
336
336
|
def _ListBatchJobsParameters_to_mldev(
|
337
|
-
api_client:
|
337
|
+
api_client: BaseApiClient,
|
338
338
|
from_object: Union[dict, object],
|
339
|
-
parent_object: dict = None,
|
339
|
+
parent_object: Optional[dict] = None,
|
340
340
|
) -> dict:
|
341
|
-
to_object = {}
|
341
|
+
to_object: dict[str, Any] = {}
|
342
342
|
if getv(from_object, ['config']) is not None:
|
343
343
|
raise ValueError('config parameter is not supported in Gemini API.')
|
344
344
|
|
@@ -346,11 +346,11 @@ def _ListBatchJobsParameters_to_mldev(
|
|
346
346
|
|
347
347
|
|
348
348
|
def _ListBatchJobsParameters_to_vertex(
|
349
|
-
api_client:
|
349
|
+
api_client: BaseApiClient,
|
350
350
|
from_object: Union[dict, object],
|
351
|
-
parent_object: dict = None,
|
351
|
+
parent_object: Optional[dict] = None,
|
352
352
|
) -> dict:
|
353
|
-
to_object = {}
|
353
|
+
to_object: dict[str, Any] = {}
|
354
354
|
if getv(from_object, ['config']) is not None:
|
355
355
|
setv(
|
356
356
|
to_object,
|
@@ -364,11 +364,11 @@ def _ListBatchJobsParameters_to_vertex(
|
|
364
364
|
|
365
365
|
|
366
366
|
def _DeleteBatchJobParameters_to_mldev(
|
367
|
-
api_client:
|
367
|
+
api_client: BaseApiClient,
|
368
368
|
from_object: Union[dict, object],
|
369
|
-
parent_object: dict = None,
|
369
|
+
parent_object: Optional[dict] = None,
|
370
370
|
) -> dict:
|
371
|
-
to_object = {}
|
371
|
+
to_object: dict[str, Any] = {}
|
372
372
|
if getv(from_object, ['name']) is not None:
|
373
373
|
raise ValueError('name parameter is not supported in Gemini API.')
|
374
374
|
|
@@ -379,11 +379,11 @@ def _DeleteBatchJobParameters_to_mldev(
|
|
379
379
|
|
380
380
|
|
381
381
|
def _DeleteBatchJobParameters_to_vertex(
|
382
|
-
api_client:
|
382
|
+
api_client: BaseApiClient,
|
383
383
|
from_object: Union[dict, object],
|
384
|
-
parent_object: dict = None,
|
384
|
+
parent_object: Optional[dict] = None,
|
385
385
|
) -> dict:
|
386
|
-
to_object = {}
|
386
|
+
to_object: dict[str, Any] = {}
|
387
387
|
if getv(from_object, ['name']) is not None:
|
388
388
|
setv(
|
389
389
|
to_object,
|
@@ -398,21 +398,21 @@ def _DeleteBatchJobParameters_to_vertex(
|
|
398
398
|
|
399
399
|
|
400
400
|
def _JobError_from_mldev(
|
401
|
-
api_client:
|
401
|
+
api_client: BaseApiClient,
|
402
402
|
from_object: Union[dict, object],
|
403
|
-
parent_object: dict = None,
|
403
|
+
parent_object: Optional[dict] = None,
|
404
404
|
) -> dict:
|
405
|
-
to_object = {}
|
405
|
+
to_object: dict[str, Any] = {}
|
406
406
|
|
407
407
|
return to_object
|
408
408
|
|
409
409
|
|
410
410
|
def _JobError_from_vertex(
|
411
|
-
api_client:
|
411
|
+
api_client: BaseApiClient,
|
412
412
|
from_object: Union[dict, object],
|
413
|
-
parent_object: dict = None,
|
413
|
+
parent_object: Optional[dict] = None,
|
414
414
|
) -> dict:
|
415
|
-
to_object = {}
|
415
|
+
to_object: dict[str, Any] = {}
|
416
416
|
if getv(from_object, ['details']) is not None:
|
417
417
|
setv(to_object, ['details'], getv(from_object, ['details']))
|
418
418
|
|
@@ -426,21 +426,21 @@ def _JobError_from_vertex(
|
|
426
426
|
|
427
427
|
|
428
428
|
def _BatchJobSource_from_mldev(
|
429
|
-
api_client:
|
429
|
+
api_client: BaseApiClient,
|
430
430
|
from_object: Union[dict, object],
|
431
|
-
parent_object: dict = None,
|
431
|
+
parent_object: Optional[dict] = None,
|
432
432
|
) -> dict:
|
433
|
-
to_object = {}
|
433
|
+
to_object: dict[str, Any] = {}
|
434
434
|
|
435
435
|
return to_object
|
436
436
|
|
437
437
|
|
438
438
|
def _BatchJobSource_from_vertex(
|
439
|
-
api_client:
|
439
|
+
api_client: BaseApiClient,
|
440
440
|
from_object: Union[dict, object],
|
441
|
-
parent_object: dict = None,
|
441
|
+
parent_object: Optional[dict] = None,
|
442
442
|
) -> dict:
|
443
|
-
to_object = {}
|
443
|
+
to_object: dict[str, Any] = {}
|
444
444
|
if getv(from_object, ['instancesFormat']) is not None:
|
445
445
|
setv(to_object, ['format'], getv(from_object, ['instancesFormat']))
|
446
446
|
|
@@ -458,21 +458,21 @@ def _BatchJobSource_from_vertex(
|
|
458
458
|
|
459
459
|
|
460
460
|
def _BatchJobDestination_from_mldev(
|
461
|
-
api_client:
|
461
|
+
api_client: BaseApiClient,
|
462
462
|
from_object: Union[dict, object],
|
463
|
-
parent_object: dict = None,
|
463
|
+
parent_object: Optional[dict] = None,
|
464
464
|
) -> dict:
|
465
|
-
to_object = {}
|
465
|
+
to_object: dict[str, Any] = {}
|
466
466
|
|
467
467
|
return to_object
|
468
468
|
|
469
469
|
|
470
470
|
def _BatchJobDestination_from_vertex(
|
471
|
-
api_client:
|
471
|
+
api_client: BaseApiClient,
|
472
472
|
from_object: Union[dict, object],
|
473
|
-
parent_object: dict = None,
|
473
|
+
parent_object: Optional[dict] = None,
|
474
474
|
) -> dict:
|
475
|
-
to_object = {}
|
475
|
+
to_object: dict[str, Any] = {}
|
476
476
|
if getv(from_object, ['predictionsFormat']) is not None:
|
477
477
|
setv(to_object, ['format'], getv(from_object, ['predictionsFormat']))
|
478
478
|
|
@@ -494,21 +494,21 @@ def _BatchJobDestination_from_vertex(
|
|
494
494
|
|
495
495
|
|
496
496
|
def _BatchJob_from_mldev(
|
497
|
-
api_client:
|
497
|
+
api_client: BaseApiClient,
|
498
498
|
from_object: Union[dict, object],
|
499
|
-
parent_object: dict = None,
|
499
|
+
parent_object: Optional[dict] = None,
|
500
500
|
) -> dict:
|
501
|
-
to_object = {}
|
501
|
+
to_object: dict[str, Any] = {}
|
502
502
|
|
503
503
|
return to_object
|
504
504
|
|
505
505
|
|
506
506
|
def _BatchJob_from_vertex(
|
507
|
-
api_client:
|
507
|
+
api_client: BaseApiClient,
|
508
508
|
from_object: Union[dict, object],
|
509
|
-
parent_object: dict = None,
|
509
|
+
parent_object: Optional[dict] = None,
|
510
510
|
) -> dict:
|
511
|
-
to_object = {}
|
511
|
+
to_object: dict[str, Any] = {}
|
512
512
|
if getv(from_object, ['name']) is not None:
|
513
513
|
setv(to_object, ['name'], getv(from_object, ['name']))
|
514
514
|
|
@@ -564,11 +564,11 @@ def _BatchJob_from_vertex(
|
|
564
564
|
|
565
565
|
|
566
566
|
def _ListBatchJobsResponse_from_mldev(
|
567
|
-
api_client:
|
567
|
+
api_client: BaseApiClient,
|
568
568
|
from_object: Union[dict, object],
|
569
|
-
parent_object: dict = None,
|
569
|
+
parent_object: Optional[dict] = None,
|
570
570
|
) -> dict:
|
571
|
-
to_object = {}
|
571
|
+
to_object: dict[str, Any] = {}
|
572
572
|
if getv(from_object, ['nextPageToken']) is not None:
|
573
573
|
setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
|
574
574
|
|
@@ -576,11 +576,11 @@ def _ListBatchJobsResponse_from_mldev(
|
|
576
576
|
|
577
577
|
|
578
578
|
def _ListBatchJobsResponse_from_vertex(
|
579
|
-
api_client:
|
579
|
+
api_client: BaseApiClient,
|
580
580
|
from_object: Union[dict, object],
|
581
|
-
parent_object: dict = None,
|
581
|
+
parent_object: Optional[dict] = None,
|
582
582
|
) -> dict:
|
583
|
-
to_object = {}
|
583
|
+
to_object: dict[str, Any] = {}
|
584
584
|
if getv(from_object, ['nextPageToken']) is not None:
|
585
585
|
setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
|
586
586
|
|
@@ -598,21 +598,21 @@ def _ListBatchJobsResponse_from_vertex(
|
|
598
598
|
|
599
599
|
|
600
600
|
def _DeleteResourceJob_from_mldev(
|
601
|
-
api_client:
|
601
|
+
api_client: BaseApiClient,
|
602
602
|
from_object: Union[dict, object],
|
603
|
-
parent_object: dict = None,
|
603
|
+
parent_object: Optional[dict] = None,
|
604
604
|
) -> dict:
|
605
|
-
to_object = {}
|
605
|
+
to_object: dict[str, Any] = {}
|
606
606
|
|
607
607
|
return to_object
|
608
608
|
|
609
609
|
|
610
610
|
def _DeleteResourceJob_from_vertex(
|
611
|
-
api_client:
|
611
|
+
api_client: BaseApiClient,
|
612
612
|
from_object: Union[dict, object],
|
613
|
-
parent_object: dict = None,
|
613
|
+
parent_object: Optional[dict] = None,
|
614
614
|
) -> dict:
|
615
|
-
to_object = {}
|
615
|
+
to_object: dict[str, Any] = {}
|
616
616
|
if getv(from_object, ['name']) is not None:
|
617
617
|
setv(to_object, ['name'], getv(from_object, ['name']))
|
618
618
|
|
@@ -646,13 +646,18 @@ class Batches(_api_module.BaseModule):
|
|
646
646
|
config=config,
|
647
647
|
)
|
648
648
|
|
649
|
+
request_url_dict: Optional[dict[str, str]]
|
649
650
|
if not self._api_client.vertexai:
|
650
651
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
651
652
|
else:
|
652
653
|
request_dict = _CreateBatchJobParameters_to_vertex(
|
653
654
|
self._api_client, parameter_model
|
654
655
|
)
|
655
|
-
|
656
|
+
request_url_dict = request_dict.get('_url')
|
657
|
+
if request_url_dict:
|
658
|
+
path = 'batchPredictionJobs'.format_map(request_url_dict)
|
659
|
+
else:
|
660
|
+
path = 'batchPredictionJobs'
|
656
661
|
|
657
662
|
query_params = request_dict.get('_query')
|
658
663
|
if query_params:
|
@@ -660,7 +665,7 @@ class Batches(_api_module.BaseModule):
|
|
660
665
|
# TODO: remove the hack that pops config.
|
661
666
|
request_dict.pop('config', None)
|
662
667
|
|
663
|
-
http_options = None
|
668
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
664
669
|
if isinstance(config, dict):
|
665
670
|
http_options = config.get('http_options', None)
|
666
671
|
elif hasattr(config, 'http_options'):
|
@@ -679,7 +684,7 @@ class Batches(_api_module.BaseModule):
|
|
679
684
|
response_dict = _BatchJob_from_mldev(self._api_client, response_dict)
|
680
685
|
|
681
686
|
return_value = types.BatchJob._from_response(
|
682
|
-
response=response_dict, kwargs=parameter_model
|
687
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
683
688
|
)
|
684
689
|
self._api_client._verify_response(return_value)
|
685
690
|
return return_value
|
@@ -710,13 +715,18 @@ class Batches(_api_module.BaseModule):
|
|
710
715
|
config=config,
|
711
716
|
)
|
712
717
|
|
718
|
+
request_url_dict: Optional[dict[str, str]]
|
713
719
|
if not self._api_client.vertexai:
|
714
720
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
715
721
|
else:
|
716
722
|
request_dict = _GetBatchJobParameters_to_vertex(
|
717
723
|
self._api_client, parameter_model
|
718
724
|
)
|
719
|
-
|
725
|
+
request_url_dict = request_dict.get('_url')
|
726
|
+
if request_url_dict:
|
727
|
+
path = 'batchPredictionJobs/{name}'.format_map(request_url_dict)
|
728
|
+
else:
|
729
|
+
path = 'batchPredictionJobs/{name}'
|
720
730
|
|
721
731
|
query_params = request_dict.get('_query')
|
722
732
|
if query_params:
|
@@ -724,7 +734,7 @@ class Batches(_api_module.BaseModule):
|
|
724
734
|
# TODO: remove the hack that pops config.
|
725
735
|
request_dict.pop('config', None)
|
726
736
|
|
727
|
-
http_options = None
|
737
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
728
738
|
if isinstance(config, dict):
|
729
739
|
http_options = config.get('http_options', None)
|
730
740
|
elif hasattr(config, 'http_options'):
|
@@ -743,7 +753,7 @@ class Batches(_api_module.BaseModule):
|
|
743
753
|
response_dict = _BatchJob_from_mldev(self._api_client, response_dict)
|
744
754
|
|
745
755
|
return_value = types.BatchJob._from_response(
|
746
|
-
response=response_dict, kwargs=parameter_model
|
756
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
747
757
|
)
|
748
758
|
self._api_client._verify_response(return_value)
|
749
759
|
return return_value
|
@@ -775,15 +785,18 @@ class Batches(_api_module.BaseModule):
|
|
775
785
|
config=config,
|
776
786
|
)
|
777
787
|
|
788
|
+
request_url_dict: Optional[dict[str, str]]
|
778
789
|
if not self._api_client.vertexai:
|
779
790
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
780
791
|
else:
|
781
792
|
request_dict = _CancelBatchJobParameters_to_vertex(
|
782
793
|
self._api_client, parameter_model
|
783
794
|
)
|
784
|
-
|
785
|
-
|
786
|
-
|
795
|
+
request_url_dict = request_dict.get('_url')
|
796
|
+
if request_url_dict:
|
797
|
+
path = 'batchPredictionJobs/{name}:cancel'.format_map(request_url_dict)
|
798
|
+
else:
|
799
|
+
path = 'batchPredictionJobs/{name}:cancel'
|
787
800
|
|
788
801
|
query_params = request_dict.get('_query')
|
789
802
|
if query_params:
|
@@ -791,7 +804,7 @@ class Batches(_api_module.BaseModule):
|
|
791
804
|
# TODO: remove the hack that pops config.
|
792
805
|
request_dict.pop('config', None)
|
793
806
|
|
794
|
-
http_options = None
|
807
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
795
808
|
if isinstance(config, dict):
|
796
809
|
http_options = config.get('http_options', None)
|
797
810
|
elif hasattr(config, 'http_options'):
|
@@ -811,13 +824,18 @@ class Batches(_api_module.BaseModule):
|
|
811
824
|
config=config,
|
812
825
|
)
|
813
826
|
|
827
|
+
request_url_dict: Optional[dict[str, str]]
|
814
828
|
if not self._api_client.vertexai:
|
815
829
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
816
830
|
else:
|
817
831
|
request_dict = _ListBatchJobsParameters_to_vertex(
|
818
832
|
self._api_client, parameter_model
|
819
833
|
)
|
820
|
-
|
834
|
+
request_url_dict = request_dict.get('_url')
|
835
|
+
if request_url_dict:
|
836
|
+
path = 'batchPredictionJobs'.format_map(request_url_dict)
|
837
|
+
else:
|
838
|
+
path = 'batchPredictionJobs'
|
821
839
|
|
822
840
|
query_params = request_dict.get('_query')
|
823
841
|
if query_params:
|
@@ -825,7 +843,7 @@ class Batches(_api_module.BaseModule):
|
|
825
843
|
# TODO: remove the hack that pops config.
|
826
844
|
request_dict.pop('config', None)
|
827
845
|
|
828
|
-
http_options = None
|
846
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
829
847
|
if isinstance(config, dict):
|
830
848
|
http_options = config.get('http_options', None)
|
831
849
|
elif hasattr(config, 'http_options'):
|
@@ -848,7 +866,7 @@ class Batches(_api_module.BaseModule):
|
|
848
866
|
)
|
849
867
|
|
850
868
|
return_value = types.ListBatchJobsResponse._from_response(
|
851
|
-
response=response_dict, kwargs=parameter_model
|
869
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
852
870
|
)
|
853
871
|
self._api_client._verify_response(return_value)
|
854
872
|
return return_value
|
@@ -881,13 +899,18 @@ class Batches(_api_module.BaseModule):
|
|
881
899
|
config=config,
|
882
900
|
)
|
883
901
|
|
902
|
+
request_url_dict: Optional[dict[str, str]]
|
884
903
|
if not self._api_client.vertexai:
|
885
904
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
886
905
|
else:
|
887
906
|
request_dict = _DeleteBatchJobParameters_to_vertex(
|
888
907
|
self._api_client, parameter_model
|
889
908
|
)
|
890
|
-
|
909
|
+
request_url_dict = request_dict.get('_url')
|
910
|
+
if request_url_dict:
|
911
|
+
path = 'batchPredictionJobs/{name}'.format_map(request_url_dict)
|
912
|
+
else:
|
913
|
+
path = 'batchPredictionJobs/{name}'
|
891
914
|
|
892
915
|
query_params = request_dict.get('_query')
|
893
916
|
if query_params:
|
@@ -895,7 +918,7 @@ class Batches(_api_module.BaseModule):
|
|
895
918
|
# TODO: remove the hack that pops config.
|
896
919
|
request_dict.pop('config', None)
|
897
920
|
|
898
|
-
http_options = None
|
921
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
899
922
|
if isinstance(config, dict):
|
900
923
|
http_options = config.get('http_options', None)
|
901
924
|
elif hasattr(config, 'http_options'):
|
@@ -918,7 +941,7 @@ class Batches(_api_module.BaseModule):
|
|
918
941
|
)
|
919
942
|
|
920
943
|
return_value = types.DeleteResourceJob._from_response(
|
921
|
-
response=response_dict, kwargs=parameter_model
|
944
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
922
945
|
)
|
923
946
|
self._api_client._verify_response(return_value)
|
924
947
|
return return_value
|
@@ -998,13 +1021,18 @@ class AsyncBatches(_api_module.BaseModule):
|
|
998
1021
|
config=config,
|
999
1022
|
)
|
1000
1023
|
|
1024
|
+
request_url_dict: Optional[dict[str, str]]
|
1001
1025
|
if not self._api_client.vertexai:
|
1002
1026
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
1003
1027
|
else:
|
1004
1028
|
request_dict = _CreateBatchJobParameters_to_vertex(
|
1005
1029
|
self._api_client, parameter_model
|
1006
1030
|
)
|
1007
|
-
|
1031
|
+
request_url_dict = request_dict.get('_url')
|
1032
|
+
if request_url_dict:
|
1033
|
+
path = 'batchPredictionJobs'.format_map(request_url_dict)
|
1034
|
+
else:
|
1035
|
+
path = 'batchPredictionJobs'
|
1008
1036
|
|
1009
1037
|
query_params = request_dict.get('_query')
|
1010
1038
|
if query_params:
|
@@ -1012,7 +1040,7 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1012
1040
|
# TODO: remove the hack that pops config.
|
1013
1041
|
request_dict.pop('config', None)
|
1014
1042
|
|
1015
|
-
http_options = None
|
1043
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
1016
1044
|
if isinstance(config, dict):
|
1017
1045
|
http_options = config.get('http_options', None)
|
1018
1046
|
elif hasattr(config, 'http_options'):
|
@@ -1031,7 +1059,7 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1031
1059
|
response_dict = _BatchJob_from_mldev(self._api_client, response_dict)
|
1032
1060
|
|
1033
1061
|
return_value = types.BatchJob._from_response(
|
1034
|
-
response=response_dict, kwargs=parameter_model
|
1062
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
1035
1063
|
)
|
1036
1064
|
self._api_client._verify_response(return_value)
|
1037
1065
|
return return_value
|
@@ -1062,13 +1090,18 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1062
1090
|
config=config,
|
1063
1091
|
)
|
1064
1092
|
|
1093
|
+
request_url_dict: Optional[dict[str, str]]
|
1065
1094
|
if not self._api_client.vertexai:
|
1066
1095
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
1067
1096
|
else:
|
1068
1097
|
request_dict = _GetBatchJobParameters_to_vertex(
|
1069
1098
|
self._api_client, parameter_model
|
1070
1099
|
)
|
1071
|
-
|
1100
|
+
request_url_dict = request_dict.get('_url')
|
1101
|
+
if request_url_dict:
|
1102
|
+
path = 'batchPredictionJobs/{name}'.format_map(request_url_dict)
|
1103
|
+
else:
|
1104
|
+
path = 'batchPredictionJobs/{name}'
|
1072
1105
|
|
1073
1106
|
query_params = request_dict.get('_query')
|
1074
1107
|
if query_params:
|
@@ -1076,7 +1109,7 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1076
1109
|
# TODO: remove the hack that pops config.
|
1077
1110
|
request_dict.pop('config', None)
|
1078
1111
|
|
1079
|
-
http_options = None
|
1112
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
1080
1113
|
if isinstance(config, dict):
|
1081
1114
|
http_options = config.get('http_options', None)
|
1082
1115
|
elif hasattr(config, 'http_options'):
|
@@ -1095,7 +1128,7 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1095
1128
|
response_dict = _BatchJob_from_mldev(self._api_client, response_dict)
|
1096
1129
|
|
1097
1130
|
return_value = types.BatchJob._from_response(
|
1098
|
-
response=response_dict, kwargs=parameter_model
|
1131
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
1099
1132
|
)
|
1100
1133
|
self._api_client._verify_response(return_value)
|
1101
1134
|
return return_value
|
@@ -1127,15 +1160,18 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1127
1160
|
config=config,
|
1128
1161
|
)
|
1129
1162
|
|
1163
|
+
request_url_dict: Optional[dict[str, str]]
|
1130
1164
|
if not self._api_client.vertexai:
|
1131
1165
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
1132
1166
|
else:
|
1133
1167
|
request_dict = _CancelBatchJobParameters_to_vertex(
|
1134
1168
|
self._api_client, parameter_model
|
1135
1169
|
)
|
1136
|
-
|
1137
|
-
|
1138
|
-
|
1170
|
+
request_url_dict = request_dict.get('_url')
|
1171
|
+
if request_url_dict:
|
1172
|
+
path = 'batchPredictionJobs/{name}:cancel'.format_map(request_url_dict)
|
1173
|
+
else:
|
1174
|
+
path = 'batchPredictionJobs/{name}:cancel'
|
1139
1175
|
|
1140
1176
|
query_params = request_dict.get('_query')
|
1141
1177
|
if query_params:
|
@@ -1143,7 +1179,7 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1143
1179
|
# TODO: remove the hack that pops config.
|
1144
1180
|
request_dict.pop('config', None)
|
1145
1181
|
|
1146
|
-
http_options = None
|
1182
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
1147
1183
|
if isinstance(config, dict):
|
1148
1184
|
http_options = config.get('http_options', None)
|
1149
1185
|
elif hasattr(config, 'http_options'):
|
@@ -1163,13 +1199,18 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1163
1199
|
config=config,
|
1164
1200
|
)
|
1165
1201
|
|
1202
|
+
request_url_dict: Optional[dict[str, str]]
|
1166
1203
|
if not self._api_client.vertexai:
|
1167
1204
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
1168
1205
|
else:
|
1169
1206
|
request_dict = _ListBatchJobsParameters_to_vertex(
|
1170
1207
|
self._api_client, parameter_model
|
1171
1208
|
)
|
1172
|
-
|
1209
|
+
request_url_dict = request_dict.get('_url')
|
1210
|
+
if request_url_dict:
|
1211
|
+
path = 'batchPredictionJobs'.format_map(request_url_dict)
|
1212
|
+
else:
|
1213
|
+
path = 'batchPredictionJobs'
|
1173
1214
|
|
1174
1215
|
query_params = request_dict.get('_query')
|
1175
1216
|
if query_params:
|
@@ -1177,7 +1218,7 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1177
1218
|
# TODO: remove the hack that pops config.
|
1178
1219
|
request_dict.pop('config', None)
|
1179
1220
|
|
1180
|
-
http_options = None
|
1221
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
1181
1222
|
if isinstance(config, dict):
|
1182
1223
|
http_options = config.get('http_options', None)
|
1183
1224
|
elif hasattr(config, 'http_options'):
|
@@ -1200,7 +1241,7 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1200
1241
|
)
|
1201
1242
|
|
1202
1243
|
return_value = types.ListBatchJobsResponse._from_response(
|
1203
|
-
response=response_dict, kwargs=parameter_model
|
1244
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
1204
1245
|
)
|
1205
1246
|
self._api_client._verify_response(return_value)
|
1206
1247
|
return return_value
|
@@ -1233,13 +1274,18 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1233
1274
|
config=config,
|
1234
1275
|
)
|
1235
1276
|
|
1277
|
+
request_url_dict: Optional[dict[str, str]]
|
1236
1278
|
if not self._api_client.vertexai:
|
1237
1279
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
1238
1280
|
else:
|
1239
1281
|
request_dict = _DeleteBatchJobParameters_to_vertex(
|
1240
1282
|
self._api_client, parameter_model
|
1241
1283
|
)
|
1242
|
-
|
1284
|
+
request_url_dict = request_dict.get('_url')
|
1285
|
+
if request_url_dict:
|
1286
|
+
path = 'batchPredictionJobs/{name}'.format_map(request_url_dict)
|
1287
|
+
else:
|
1288
|
+
path = 'batchPredictionJobs/{name}'
|
1243
1289
|
|
1244
1290
|
query_params = request_dict.get('_query')
|
1245
1291
|
if query_params:
|
@@ -1247,7 +1293,7 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1247
1293
|
# TODO: remove the hack that pops config.
|
1248
1294
|
request_dict.pop('config', None)
|
1249
1295
|
|
1250
|
-
http_options = None
|
1296
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
1251
1297
|
if isinstance(config, dict):
|
1252
1298
|
http_options = config.get('http_options', None)
|
1253
1299
|
elif hasattr(config, 'http_options'):
|
@@ -1270,7 +1316,7 @@ class AsyncBatches(_api_module.BaseModule):
|
|
1270
1316
|
)
|
1271
1317
|
|
1272
1318
|
return_value = types.DeleteResourceJob._from_response(
|
1273
|
-
response=response_dict, kwargs=parameter_model
|
1319
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
1274
1320
|
)
|
1275
1321
|
self._api_client._verify_response(return_value)
|
1276
1322
|
return return_value
|