google-genai 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1041 @@
1
+ # Copyright 2024 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ #
15
+
16
+ from typing import Optional, Union
17
+ from urllib.parse import urlencode
18
+ from . import _common
19
+ from . import _extra_utils
20
+ from . import _transformers as t
21
+ from . import types
22
+ from ._api_client import ApiClient
23
+ from ._common import get_value_by_path as getv
24
+ from ._common import set_value_by_path as setv
25
+ from .pagers import AsyncPager, Pager
26
+
27
+
28
+ def _BatchJobSource_to_mldev(
29
+ api_client: ApiClient,
30
+ from_object: Union[dict, object],
31
+ parent_object: dict = None,
32
+ ) -> dict:
33
+ to_object = {}
34
+ if getv(from_object, ['format']):
35
+ raise ValueError('format parameter is not supported in Google AI.')
36
+
37
+ if getv(from_object, ['gcs_uri']):
38
+ raise ValueError('gcs_uri parameter is not supported in Google AI.')
39
+
40
+ if getv(from_object, ['bigquery_uri']):
41
+ raise ValueError('bigquery_uri parameter is not supported in Google AI.')
42
+
43
+ return to_object
44
+
45
+
46
+ def _BatchJobSource_to_vertex(
47
+ api_client: ApiClient,
48
+ from_object: Union[dict, object],
49
+ parent_object: dict = None,
50
+ ) -> dict:
51
+ to_object = {}
52
+ if getv(from_object, ['format']) is not None:
53
+ setv(to_object, ['instancesFormat'], getv(from_object, ['format']))
54
+
55
+ if getv(from_object, ['gcs_uri']) is not None:
56
+ setv(to_object, ['gcsSource', 'uris'], getv(from_object, ['gcs_uri']))
57
+
58
+ if getv(from_object, ['bigquery_uri']) is not None:
59
+ setv(
60
+ to_object,
61
+ ['bigquerySource', 'inputUri'],
62
+ getv(from_object, ['bigquery_uri']),
63
+ )
64
+
65
+ return to_object
66
+
67
+
68
+ def _BatchJobDestination_to_mldev(
69
+ api_client: ApiClient,
70
+ from_object: Union[dict, object],
71
+ parent_object: dict = None,
72
+ ) -> dict:
73
+ to_object = {}
74
+ if getv(from_object, ['format']):
75
+ raise ValueError('format parameter is not supported in Google AI.')
76
+
77
+ if getv(from_object, ['gcs_uri']):
78
+ raise ValueError('gcs_uri parameter is not supported in Google AI.')
79
+
80
+ if getv(from_object, ['bigquery_uri']):
81
+ raise ValueError('bigquery_uri parameter is not supported in Google AI.')
82
+
83
+ return to_object
84
+
85
+
86
+ def _BatchJobDestination_to_vertex(
87
+ api_client: ApiClient,
88
+ from_object: Union[dict, object],
89
+ parent_object: dict = None,
90
+ ) -> dict:
91
+ to_object = {}
92
+ if getv(from_object, ['format']) is not None:
93
+ setv(to_object, ['predictionsFormat'], getv(from_object, ['format']))
94
+
95
+ if getv(from_object, ['gcs_uri']) is not None:
96
+ setv(
97
+ to_object,
98
+ ['gcsDestination', 'outputUriPrefix'],
99
+ getv(from_object, ['gcs_uri']),
100
+ )
101
+
102
+ if getv(from_object, ['bigquery_uri']) is not None:
103
+ setv(
104
+ to_object,
105
+ ['bigqueryDestination', 'outputUri'],
106
+ getv(from_object, ['bigquery_uri']),
107
+ )
108
+
109
+ return to_object
110
+
111
+
112
+ def _CreateBatchJobConfig_to_mldev(
113
+ api_client: ApiClient,
114
+ from_object: Union[dict, object],
115
+ parent_object: dict = None,
116
+ ) -> dict:
117
+ to_object = {}
118
+ if getv(from_object, ['http_options']) is not None:
119
+ setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
120
+
121
+ if getv(from_object, ['display_name']) is not None:
122
+ setv(parent_object, ['displayName'], getv(from_object, ['display_name']))
123
+
124
+ if getv(from_object, ['dest']):
125
+ raise ValueError('dest parameter is not supported in Google AI.')
126
+
127
+ return to_object
128
+
129
+
130
+ def _CreateBatchJobConfig_to_vertex(
131
+ api_client: ApiClient,
132
+ from_object: Union[dict, object],
133
+ parent_object: dict = None,
134
+ ) -> dict:
135
+ to_object = {}
136
+ if getv(from_object, ['http_options']) is not None:
137
+ setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
138
+
139
+ if getv(from_object, ['display_name']) is not None:
140
+ setv(parent_object, ['displayName'], getv(from_object, ['display_name']))
141
+
142
+ if getv(from_object, ['dest']) is not None:
143
+ setv(
144
+ parent_object,
145
+ ['outputConfig'],
146
+ _BatchJobDestination_to_vertex(
147
+ api_client,
148
+ t.t_batch_job_destination(api_client, getv(from_object, ['dest'])),
149
+ to_object,
150
+ ),
151
+ )
152
+
153
+ return to_object
154
+
155
+
156
+ def _CreateBatchJobParameters_to_mldev(
157
+ api_client: ApiClient,
158
+ from_object: Union[dict, object],
159
+ parent_object: dict = None,
160
+ ) -> dict:
161
+ to_object = {}
162
+ if getv(from_object, ['model']):
163
+ raise ValueError('model parameter is not supported in Google AI.')
164
+
165
+ if getv(from_object, ['src']):
166
+ raise ValueError('src parameter is not supported in Google AI.')
167
+
168
+ if getv(from_object, ['config']) is not None:
169
+ setv(
170
+ to_object,
171
+ ['config'],
172
+ _CreateBatchJobConfig_to_mldev(
173
+ api_client, getv(from_object, ['config']), to_object
174
+ ),
175
+ )
176
+
177
+ return to_object
178
+
179
+
180
+ def _CreateBatchJobParameters_to_vertex(
181
+ api_client: ApiClient,
182
+ from_object: Union[dict, object],
183
+ parent_object: dict = None,
184
+ ) -> dict:
185
+ to_object = {}
186
+ if getv(from_object, ['model']) is not None:
187
+ setv(
188
+ to_object,
189
+ ['model'],
190
+ t.t_model(api_client, getv(from_object, ['model'])),
191
+ )
192
+
193
+ if getv(from_object, ['src']) is not None:
194
+ setv(
195
+ to_object,
196
+ ['inputConfig'],
197
+ _BatchJobSource_to_vertex(
198
+ api_client,
199
+ t.t_batch_job_source(api_client, getv(from_object, ['src'])),
200
+ to_object,
201
+ ),
202
+ )
203
+
204
+ if getv(from_object, ['config']) is not None:
205
+ setv(
206
+ to_object,
207
+ ['config'],
208
+ _CreateBatchJobConfig_to_vertex(
209
+ api_client, getv(from_object, ['config']), to_object
210
+ ),
211
+ )
212
+
213
+ return to_object
214
+
215
+
216
+ def _GetBatchJobParameters_to_mldev(
217
+ api_client: ApiClient,
218
+ from_object: Union[dict, object],
219
+ parent_object: dict = None,
220
+ ) -> dict:
221
+ to_object = {}
222
+ if getv(from_object, ['name']):
223
+ raise ValueError('name parameter is not supported in Google AI.')
224
+
225
+ return to_object
226
+
227
+
228
+ def _GetBatchJobParameters_to_vertex(
229
+ api_client: ApiClient,
230
+ from_object: Union[dict, object],
231
+ parent_object: dict = None,
232
+ ) -> dict:
233
+ to_object = {}
234
+ if getv(from_object, ['name']) is not None:
235
+ setv(
236
+ to_object,
237
+ ['_url', 'name'],
238
+ t.t_batch_job_name(api_client, getv(from_object, ['name'])),
239
+ )
240
+
241
+ return to_object
242
+
243
+
244
+ def _CancelBatchJobParameters_to_mldev(
245
+ api_client: ApiClient,
246
+ from_object: Union[dict, object],
247
+ parent_object: dict = None,
248
+ ) -> dict:
249
+ to_object = {}
250
+ if getv(from_object, ['name']):
251
+ raise ValueError('name parameter is not supported in Google AI.')
252
+
253
+ return to_object
254
+
255
+
256
+ def _CancelBatchJobParameters_to_vertex(
257
+ api_client: ApiClient,
258
+ from_object: Union[dict, object],
259
+ parent_object: dict = None,
260
+ ) -> dict:
261
+ to_object = {}
262
+ if getv(from_object, ['name']) is not None:
263
+ setv(
264
+ to_object,
265
+ ['_url', 'name'],
266
+ t.t_batch_job_name(api_client, getv(from_object, ['name'])),
267
+ )
268
+
269
+ return to_object
270
+
271
+
272
+ def _ListBatchJobConfig_to_mldev(
273
+ api_client: ApiClient,
274
+ from_object: Union[dict, object],
275
+ parent_object: dict = None,
276
+ ) -> dict:
277
+ to_object = {}
278
+ if getv(from_object, ['http_options']) is not None:
279
+ setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
280
+
281
+ if getv(from_object, ['page_size']) is not None:
282
+ setv(
283
+ parent_object, ['_query', 'pageSize'], getv(from_object, ['page_size'])
284
+ )
285
+
286
+ if getv(from_object, ['page_token']) is not None:
287
+ setv(
288
+ parent_object,
289
+ ['_query', 'pageToken'],
290
+ getv(from_object, ['page_token']),
291
+ )
292
+
293
+ if getv(from_object, ['filter']):
294
+ raise ValueError('filter parameter is not supported in Google AI.')
295
+
296
+ return to_object
297
+
298
+
299
+ def _ListBatchJobConfig_to_vertex(
300
+ api_client: ApiClient,
301
+ from_object: Union[dict, object],
302
+ parent_object: dict = None,
303
+ ) -> dict:
304
+ to_object = {}
305
+ if getv(from_object, ['http_options']) is not None:
306
+ setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
307
+
308
+ if getv(from_object, ['page_size']) is not None:
309
+ setv(
310
+ parent_object, ['_query', 'pageSize'], getv(from_object, ['page_size'])
311
+ )
312
+
313
+ if getv(from_object, ['page_token']) is not None:
314
+ setv(
315
+ parent_object,
316
+ ['_query', 'pageToken'],
317
+ getv(from_object, ['page_token']),
318
+ )
319
+
320
+ if getv(from_object, ['filter']) is not None:
321
+ setv(parent_object, ['_query', 'filter'], getv(from_object, ['filter']))
322
+
323
+ return to_object
324
+
325
+
326
+ def _ListBatchJobParameters_to_mldev(
327
+ api_client: ApiClient,
328
+ from_object: Union[dict, object],
329
+ parent_object: dict = None,
330
+ ) -> dict:
331
+ to_object = {}
332
+ if getv(from_object, ['config']):
333
+ raise ValueError('config parameter is not supported in Google AI.')
334
+
335
+ return to_object
336
+
337
+
338
+ def _ListBatchJobParameters_to_vertex(
339
+ api_client: ApiClient,
340
+ from_object: Union[dict, object],
341
+ parent_object: dict = None,
342
+ ) -> dict:
343
+ to_object = {}
344
+ if getv(from_object, ['config']) is not None:
345
+ setv(
346
+ to_object,
347
+ ['config'],
348
+ _ListBatchJobConfig_to_vertex(
349
+ api_client, getv(from_object, ['config']), to_object
350
+ ),
351
+ )
352
+
353
+ return to_object
354
+
355
+
356
+ def _DeleteBatchJobParameters_to_mldev(
357
+ api_client: ApiClient,
358
+ from_object: Union[dict, object],
359
+ parent_object: dict = None,
360
+ ) -> dict:
361
+ to_object = {}
362
+ if getv(from_object, ['name']):
363
+ raise ValueError('name parameter is not supported in Google AI.')
364
+
365
+ return to_object
366
+
367
+
368
+ def _DeleteBatchJobParameters_to_vertex(
369
+ api_client: ApiClient,
370
+ from_object: Union[dict, object],
371
+ parent_object: dict = None,
372
+ ) -> dict:
373
+ to_object = {}
374
+ if getv(from_object, ['name']) is not None:
375
+ setv(
376
+ to_object,
377
+ ['_url', 'name'],
378
+ t.t_batch_job_name(api_client, getv(from_object, ['name'])),
379
+ )
380
+
381
+ return to_object
382
+
383
+
384
+ def _JobError_from_mldev(
385
+ api_client: ApiClient,
386
+ from_object: Union[dict, object],
387
+ parent_object: dict = None,
388
+ ) -> dict:
389
+ to_object = {}
390
+
391
+ return to_object
392
+
393
+
394
+ def _JobError_from_vertex(
395
+ api_client: ApiClient,
396
+ from_object: Union[dict, object],
397
+ parent_object: dict = None,
398
+ ) -> dict:
399
+ to_object = {}
400
+ if getv(from_object, ['details']) is not None:
401
+ setv(to_object, ['details'], getv(from_object, ['details']))
402
+
403
+ if getv(from_object, ['code']) is not None:
404
+ setv(to_object, ['code'], getv(from_object, ['code']))
405
+
406
+ if getv(from_object, ['message']) is not None:
407
+ setv(to_object, ['message'], getv(from_object, ['message']))
408
+
409
+ return to_object
410
+
411
+
412
+ def _BatchJobSource_from_mldev(
413
+ api_client: ApiClient,
414
+ from_object: Union[dict, object],
415
+ parent_object: dict = None,
416
+ ) -> dict:
417
+ to_object = {}
418
+
419
+ return to_object
420
+
421
+
422
+ def _BatchJobSource_from_vertex(
423
+ api_client: ApiClient,
424
+ from_object: Union[dict, object],
425
+ parent_object: dict = None,
426
+ ) -> dict:
427
+ to_object = {}
428
+ if getv(from_object, ['instancesFormat']) is not None:
429
+ setv(to_object, ['format'], getv(from_object, ['instancesFormat']))
430
+
431
+ if getv(from_object, ['gcsSource', 'uris']) is not None:
432
+ setv(to_object, ['gcs_uri'], getv(from_object, ['gcsSource', 'uris']))
433
+
434
+ if getv(from_object, ['bigquerySource', 'inputUri']) is not None:
435
+ setv(
436
+ to_object,
437
+ ['bigquery_uri'],
438
+ getv(from_object, ['bigquerySource', 'inputUri']),
439
+ )
440
+
441
+ return to_object
442
+
443
+
444
+ def _BatchJobDestination_from_mldev(
445
+ api_client: ApiClient,
446
+ from_object: Union[dict, object],
447
+ parent_object: dict = None,
448
+ ) -> dict:
449
+ to_object = {}
450
+
451
+ return to_object
452
+
453
+
454
+ def _BatchJobDestination_from_vertex(
455
+ api_client: ApiClient,
456
+ from_object: Union[dict, object],
457
+ parent_object: dict = None,
458
+ ) -> dict:
459
+ to_object = {}
460
+ if getv(from_object, ['predictionsFormat']) is not None:
461
+ setv(to_object, ['format'], getv(from_object, ['predictionsFormat']))
462
+
463
+ if getv(from_object, ['gcsDestination', 'outputUriPrefix']) is not None:
464
+ setv(
465
+ to_object,
466
+ ['gcs_uri'],
467
+ getv(from_object, ['gcsDestination', 'outputUriPrefix']),
468
+ )
469
+
470
+ if getv(from_object, ['bigqueryDestination', 'outputUri']) is not None:
471
+ setv(
472
+ to_object,
473
+ ['bigquery_uri'],
474
+ getv(from_object, ['bigqueryDestination', 'outputUri']),
475
+ )
476
+
477
+ return to_object
478
+
479
+
480
+ def _BatchJob_from_mldev(
481
+ api_client: ApiClient,
482
+ from_object: Union[dict, object],
483
+ parent_object: dict = None,
484
+ ) -> dict:
485
+ to_object = {}
486
+
487
+ return to_object
488
+
489
+
490
+ def _BatchJob_from_vertex(
491
+ api_client: ApiClient,
492
+ from_object: Union[dict, object],
493
+ parent_object: dict = None,
494
+ ) -> dict:
495
+ to_object = {}
496
+ if getv(from_object, ['name']) is not None:
497
+ setv(to_object, ['name'], getv(from_object, ['name']))
498
+
499
+ if getv(from_object, ['displayName']) is not None:
500
+ setv(to_object, ['display_name'], getv(from_object, ['displayName']))
501
+
502
+ if getv(from_object, ['state']) is not None:
503
+ setv(to_object, ['state'], getv(from_object, ['state']))
504
+
505
+ if getv(from_object, ['error']) is not None:
506
+ setv(
507
+ to_object,
508
+ ['error'],
509
+ _JobError_from_vertex(
510
+ api_client, getv(from_object, ['error']), to_object
511
+ ),
512
+ )
513
+
514
+ if getv(from_object, ['createTime']) is not None:
515
+ setv(to_object, ['create_time'], getv(from_object, ['createTime']))
516
+
517
+ if getv(from_object, ['startTime']) is not None:
518
+ setv(to_object, ['start_time'], getv(from_object, ['startTime']))
519
+
520
+ if getv(from_object, ['endTime']) is not None:
521
+ setv(to_object, ['end_time'], getv(from_object, ['endTime']))
522
+
523
+ if getv(from_object, ['updateTime']) is not None:
524
+ setv(to_object, ['update_time'], getv(from_object, ['updateTime']))
525
+
526
+ if getv(from_object, ['model']) is not None:
527
+ setv(to_object, ['model'], getv(from_object, ['model']))
528
+
529
+ if getv(from_object, ['inputConfig']) is not None:
530
+ setv(
531
+ to_object,
532
+ ['src'],
533
+ _BatchJobSource_from_vertex(
534
+ api_client, getv(from_object, ['inputConfig']), to_object
535
+ ),
536
+ )
537
+
538
+ if getv(from_object, ['outputConfig']) is not None:
539
+ setv(
540
+ to_object,
541
+ ['dest'],
542
+ _BatchJobDestination_from_vertex(
543
+ api_client, getv(from_object, ['outputConfig']), to_object
544
+ ),
545
+ )
546
+
547
+ return to_object
548
+
549
+
550
+ def _ListBatchJobResponse_from_mldev(
551
+ api_client: ApiClient,
552
+ from_object: Union[dict, object],
553
+ parent_object: dict = None,
554
+ ) -> dict:
555
+ to_object = {}
556
+ if getv(from_object, ['nextPageToken']) is not None:
557
+ setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
558
+
559
+ return to_object
560
+
561
+
562
+ def _ListBatchJobResponse_from_vertex(
563
+ api_client: ApiClient,
564
+ from_object: Union[dict, object],
565
+ parent_object: dict = None,
566
+ ) -> dict:
567
+ to_object = {}
568
+ if getv(from_object, ['nextPageToken']) is not None:
569
+ setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
570
+
571
+ if getv(from_object, ['batchPredictionJobs']) is not None:
572
+ setv(
573
+ to_object,
574
+ ['batch_jobs'],
575
+ [
576
+ _BatchJob_from_vertex(api_client, item, to_object)
577
+ for item in getv(from_object, ['batchPredictionJobs'])
578
+ ],
579
+ )
580
+
581
+ return to_object
582
+
583
+
584
+ def _DeleteResourceJob_from_mldev(
585
+ api_client: ApiClient,
586
+ from_object: Union[dict, object],
587
+ parent_object: dict = None,
588
+ ) -> dict:
589
+ to_object = {}
590
+
591
+ return to_object
592
+
593
+
594
+ def _DeleteResourceJob_from_vertex(
595
+ api_client: ApiClient,
596
+ from_object: Union[dict, object],
597
+ parent_object: dict = None,
598
+ ) -> dict:
599
+ to_object = {}
600
+ if getv(from_object, ['name']) is not None:
601
+ setv(to_object, ['name'], getv(from_object, ['name']))
602
+
603
+ if getv(from_object, ['done']) is not None:
604
+ setv(to_object, ['done'], getv(from_object, ['done']))
605
+
606
+ if getv(from_object, ['error']) is not None:
607
+ setv(
608
+ to_object,
609
+ ['error'],
610
+ _JobError_from_vertex(
611
+ api_client, getv(from_object, ['error']), to_object
612
+ ),
613
+ )
614
+
615
+ return to_object
616
+
617
+
618
+ class Batches(_common.BaseModule):
619
+
620
+ def _create(
621
+ self,
622
+ *,
623
+ model: str,
624
+ src: str,
625
+ config: Optional[types.CreateBatchJobConfigOrDict] = None,
626
+ ) -> types.BatchJob:
627
+ parameter_model = types._CreateBatchJobParameters(
628
+ model=model,
629
+ src=src,
630
+ config=config,
631
+ )
632
+
633
+ if not self.api_client.vertexai:
634
+ raise ValueError('This method is only supported in the Vertex AI client.')
635
+ else:
636
+ request_dict = _CreateBatchJobParameters_to_vertex(
637
+ self.api_client, parameter_model
638
+ )
639
+ path = 'batchPredictionJobs'.format_map(request_dict.get('_url'))
640
+
641
+ query_params = request_dict.get('_query')
642
+ if query_params:
643
+ path = f'{path}?{urlencode(query_params)}'
644
+ # TODO: remove the hack that pops config.
645
+ config = request_dict.pop('config', None)
646
+ http_options = config.pop('httpOptions', None) if config else None
647
+ request_dict = _common.convert_to_dict(request_dict)
648
+ request_dict = _common.apply_base64_encoding(request_dict)
649
+
650
+ response_dict = self.api_client.request(
651
+ 'post', path, request_dict, http_options
652
+ )
653
+
654
+ if self.api_client.vertexai:
655
+ response_dict = _BatchJob_from_vertex(self.api_client, response_dict)
656
+ else:
657
+ response_dict = _BatchJob_from_mldev(self.api_client, response_dict)
658
+
659
+ return_value = types.BatchJob._from_response(response_dict, parameter_model)
660
+ self.api_client._verify_response(return_value)
661
+ return return_value
662
+
663
+ def get(self, *, name: str) -> types.BatchJob:
664
+ parameter_model = types._GetBatchJobParameters(
665
+ name=name,
666
+ )
667
+
668
+ if not self.api_client.vertexai:
669
+ raise ValueError('This method is only supported in the Vertex AI client.')
670
+ else:
671
+ request_dict = _GetBatchJobParameters_to_vertex(
672
+ self.api_client, parameter_model
673
+ )
674
+ path = 'batchPredictionJobs/{name}'.format_map(request_dict.get('_url'))
675
+
676
+ query_params = request_dict.get('_query')
677
+ if query_params:
678
+ path = f'{path}?{urlencode(query_params)}'
679
+ # TODO: remove the hack that pops config.
680
+ config = request_dict.pop('config', None)
681
+ http_options = config.pop('httpOptions', None) if config else None
682
+ request_dict = _common.convert_to_dict(request_dict)
683
+ request_dict = _common.apply_base64_encoding(request_dict)
684
+
685
+ response_dict = self.api_client.request(
686
+ 'get', path, request_dict, http_options
687
+ )
688
+
689
+ if self.api_client.vertexai:
690
+ response_dict = _BatchJob_from_vertex(self.api_client, response_dict)
691
+ else:
692
+ response_dict = _BatchJob_from_mldev(self.api_client, response_dict)
693
+
694
+ return_value = types.BatchJob._from_response(response_dict, parameter_model)
695
+ self.api_client._verify_response(return_value)
696
+ return return_value
697
+
698
+ def cancel(self, *, name: str) -> None:
699
+ parameter_model = types._CancelBatchJobParameters(
700
+ name=name,
701
+ )
702
+
703
+ if not self.api_client.vertexai:
704
+ raise ValueError('This method is only supported in the Vertex AI client.')
705
+ else:
706
+ request_dict = _CancelBatchJobParameters_to_vertex(
707
+ self.api_client, parameter_model
708
+ )
709
+ path = 'batchPredictionJobs/{name}:cancel'.format_map(
710
+ request_dict.get('_url')
711
+ )
712
+
713
+ query_params = request_dict.get('_query')
714
+ if query_params:
715
+ path = f'{path}?{urlencode(query_params)}'
716
+ # TODO: remove the hack that pops config.
717
+ config = request_dict.pop('config', None)
718
+ http_options = config.pop('httpOptions', None) if config else None
719
+ request_dict = _common.convert_to_dict(request_dict)
720
+ request_dict = _common.apply_base64_encoding(request_dict)
721
+
722
+ response_dict = self.api_client.request(
723
+ 'post', path, request_dict, http_options
724
+ )
725
+
726
+ def _list(
727
+ self, *, config: types.ListBatchJobConfigOrDict
728
+ ) -> types.ListBatchJobResponse:
729
+ parameter_model = types._ListBatchJobParameters(
730
+ config=config,
731
+ )
732
+
733
+ if not self.api_client.vertexai:
734
+ raise ValueError('This method is only supported in the Vertex AI client.')
735
+ else:
736
+ request_dict = _ListBatchJobParameters_to_vertex(
737
+ self.api_client, parameter_model
738
+ )
739
+ path = 'batchPredictionJobs'.format_map(request_dict.get('_url'))
740
+
741
+ query_params = request_dict.get('_query')
742
+ if query_params:
743
+ path = f'{path}?{urlencode(query_params)}'
744
+ # TODO: remove the hack that pops config.
745
+ config = request_dict.pop('config', None)
746
+ http_options = config.pop('httpOptions', None) if config else None
747
+ request_dict = _common.convert_to_dict(request_dict)
748
+ request_dict = _common.apply_base64_encoding(request_dict)
749
+
750
+ response_dict = self.api_client.request(
751
+ 'get', path, request_dict, http_options
752
+ )
753
+
754
+ if self.api_client.vertexai:
755
+ response_dict = _ListBatchJobResponse_from_vertex(
756
+ self.api_client, response_dict
757
+ )
758
+ else:
759
+ response_dict = _ListBatchJobResponse_from_mldev(
760
+ self.api_client, response_dict
761
+ )
762
+
763
+ return_value = types.ListBatchJobResponse._from_response(
764
+ response_dict, parameter_model
765
+ )
766
+ self.api_client._verify_response(return_value)
767
+ return return_value
768
+
769
+ def delete(self, *, name: str) -> types.DeleteResourceJob:
770
+ parameter_model = types._DeleteBatchJobParameters(
771
+ name=name,
772
+ )
773
+
774
+ if not self.api_client.vertexai:
775
+ raise ValueError('This method is only supported in the Vertex AI client.')
776
+ else:
777
+ request_dict = _DeleteBatchJobParameters_to_vertex(
778
+ self.api_client, parameter_model
779
+ )
780
+ path = 'batchPredictionJobs/{name}'.format_map(request_dict.get('_url'))
781
+
782
+ query_params = request_dict.get('_query')
783
+ if query_params:
784
+ path = f'{path}?{urlencode(query_params)}'
785
+ # TODO: remove the hack that pops config.
786
+ config = request_dict.pop('config', None)
787
+ http_options = config.pop('httpOptions', None) if config else None
788
+ request_dict = _common.convert_to_dict(request_dict)
789
+ request_dict = _common.apply_base64_encoding(request_dict)
790
+
791
+ response_dict = self.api_client.request(
792
+ 'delete', path, request_dict, http_options
793
+ )
794
+
795
+ if self.api_client.vertexai:
796
+ response_dict = _DeleteResourceJob_from_vertex(
797
+ self.api_client, response_dict
798
+ )
799
+ else:
800
+ response_dict = _DeleteResourceJob_from_mldev(
801
+ self.api_client, response_dict
802
+ )
803
+
804
+ return_value = types.DeleteResourceJob._from_response(
805
+ response_dict, parameter_model
806
+ )
807
+ self.api_client._verify_response(return_value)
808
+ return return_value
809
+
810
+ def create(
811
+ self,
812
+ *,
813
+ model: str,
814
+ src: str,
815
+ config: Optional[types.CreateBatchJobConfigOrDict] = None,
816
+ ) -> types.BatchJob:
817
+ config = _extra_utils.format_destination(src, config)
818
+ return self._create(model=model, src=src, config=config)
819
+
820
+ def list(
821
+ self, *, config: Optional[types.ListBatchJobConfigOrDict] = None
822
+ ) -> Pager[types.BatchJob]:
823
+ return Pager(
824
+ 'batch_jobs',
825
+ self._list,
826
+ self._list(config=config),
827
+ config,
828
+ )
829
+
830
+
831
+ class AsyncBatches(_common.BaseModule):
832
+
833
+ async def _create(
834
+ self,
835
+ *,
836
+ model: str,
837
+ src: str,
838
+ config: Optional[types.CreateBatchJobConfigOrDict] = None,
839
+ ) -> types.BatchJob:
840
+ parameter_model = types._CreateBatchJobParameters(
841
+ model=model,
842
+ src=src,
843
+ config=config,
844
+ )
845
+
846
+ if not self.api_client.vertexai:
847
+ raise ValueError('This method is only supported in the Vertex AI client.')
848
+ else:
849
+ request_dict = _CreateBatchJobParameters_to_vertex(
850
+ self.api_client, parameter_model
851
+ )
852
+ path = 'batchPredictionJobs'.format_map(request_dict.get('_url'))
853
+
854
+ query_params = request_dict.get('_query')
855
+ if query_params:
856
+ path = f'{path}?{urlencode(query_params)}'
857
+ # TODO: remove the hack that pops config.
858
+ config = request_dict.pop('config', None)
859
+ http_options = config.pop('httpOptions', None) if config else None
860
+ request_dict = _common.convert_to_dict(request_dict)
861
+ request_dict = _common.apply_base64_encoding(request_dict)
862
+
863
+ response_dict = await self.api_client.async_request(
864
+ 'post', path, request_dict, http_options
865
+ )
866
+
867
+ if self.api_client.vertexai:
868
+ response_dict = _BatchJob_from_vertex(self.api_client, response_dict)
869
+ else:
870
+ response_dict = _BatchJob_from_mldev(self.api_client, response_dict)
871
+
872
+ return_value = types.BatchJob._from_response(response_dict, parameter_model)
873
+ self.api_client._verify_response(return_value)
874
+ return return_value
875
+
876
+ async def get(self, *, name: str) -> types.BatchJob:
877
+ parameter_model = types._GetBatchJobParameters(
878
+ name=name,
879
+ )
880
+
881
+ if not self.api_client.vertexai:
882
+ raise ValueError('This method is only supported in the Vertex AI client.')
883
+ else:
884
+ request_dict = _GetBatchJobParameters_to_vertex(
885
+ self.api_client, parameter_model
886
+ )
887
+ path = 'batchPredictionJobs/{name}'.format_map(request_dict.get('_url'))
888
+
889
+ query_params = request_dict.get('_query')
890
+ if query_params:
891
+ path = f'{path}?{urlencode(query_params)}'
892
+ # TODO: remove the hack that pops config.
893
+ config = request_dict.pop('config', None)
894
+ http_options = config.pop('httpOptions', None) if config else None
895
+ request_dict = _common.convert_to_dict(request_dict)
896
+ request_dict = _common.apply_base64_encoding(request_dict)
897
+
898
+ response_dict = await self.api_client.async_request(
899
+ 'get', path, request_dict, http_options
900
+ )
901
+
902
+ if self.api_client.vertexai:
903
+ response_dict = _BatchJob_from_vertex(self.api_client, response_dict)
904
+ else:
905
+ response_dict = _BatchJob_from_mldev(self.api_client, response_dict)
906
+
907
+ return_value = types.BatchJob._from_response(response_dict, parameter_model)
908
+ self.api_client._verify_response(return_value)
909
+ return return_value
910
+
911
+ async def cancel(self, *, name: str) -> None:
912
+ parameter_model = types._CancelBatchJobParameters(
913
+ name=name,
914
+ )
915
+
916
+ if not self.api_client.vertexai:
917
+ raise ValueError('This method is only supported in the Vertex AI client.')
918
+ else:
919
+ request_dict = _CancelBatchJobParameters_to_vertex(
920
+ self.api_client, parameter_model
921
+ )
922
+ path = 'batchPredictionJobs/{name}:cancel'.format_map(
923
+ request_dict.get('_url')
924
+ )
925
+
926
+ query_params = request_dict.get('_query')
927
+ if query_params:
928
+ path = f'{path}?{urlencode(query_params)}'
929
+ # TODO: remove the hack that pops config.
930
+ config = request_dict.pop('config', None)
931
+ http_options = config.pop('httpOptions', None) if config else None
932
+ request_dict = _common.convert_to_dict(request_dict)
933
+ request_dict = _common.apply_base64_encoding(request_dict)
934
+
935
+ response_dict = await self.api_client.async_request(
936
+ 'post', path, request_dict, http_options
937
+ )
938
+
939
+ async def _list(
940
+ self, *, config: types.ListBatchJobConfigOrDict
941
+ ) -> types.ListBatchJobResponse:
942
+ parameter_model = types._ListBatchJobParameters(
943
+ config=config,
944
+ )
945
+
946
+ if not self.api_client.vertexai:
947
+ raise ValueError('This method is only supported in the Vertex AI client.')
948
+ else:
949
+ request_dict = _ListBatchJobParameters_to_vertex(
950
+ self.api_client, parameter_model
951
+ )
952
+ path = 'batchPredictionJobs'.format_map(request_dict.get('_url'))
953
+
954
+ query_params = request_dict.get('_query')
955
+ if query_params:
956
+ path = f'{path}?{urlencode(query_params)}'
957
+ # TODO: remove the hack that pops config.
958
+ config = request_dict.pop('config', None)
959
+ http_options = config.pop('httpOptions', None) if config else None
960
+ request_dict = _common.convert_to_dict(request_dict)
961
+ request_dict = _common.apply_base64_encoding(request_dict)
962
+
963
+ response_dict = await self.api_client.async_request(
964
+ 'get', path, request_dict, http_options
965
+ )
966
+
967
+ if self.api_client.vertexai:
968
+ response_dict = _ListBatchJobResponse_from_vertex(
969
+ self.api_client, response_dict
970
+ )
971
+ else:
972
+ response_dict = _ListBatchJobResponse_from_mldev(
973
+ self.api_client, response_dict
974
+ )
975
+
976
+ return_value = types.ListBatchJobResponse._from_response(
977
+ response_dict, parameter_model
978
+ )
979
+ self.api_client._verify_response(return_value)
980
+ return return_value
981
+
982
+ async def delete(self, *, name: str) -> types.DeleteResourceJob:
983
+ parameter_model = types._DeleteBatchJobParameters(
984
+ name=name,
985
+ )
986
+
987
+ if not self.api_client.vertexai:
988
+ raise ValueError('This method is only supported in the Vertex AI client.')
989
+ else:
990
+ request_dict = _DeleteBatchJobParameters_to_vertex(
991
+ self.api_client, parameter_model
992
+ )
993
+ path = 'batchPredictionJobs/{name}'.format_map(request_dict.get('_url'))
994
+
995
+ query_params = request_dict.get('_query')
996
+ if query_params:
997
+ path = f'{path}?{urlencode(query_params)}'
998
+ # TODO: remove the hack that pops config.
999
+ config = request_dict.pop('config', None)
1000
+ http_options = config.pop('httpOptions', None) if config else None
1001
+ request_dict = _common.convert_to_dict(request_dict)
1002
+ request_dict = _common.apply_base64_encoding(request_dict)
1003
+
1004
+ response_dict = await self.api_client.async_request(
1005
+ 'delete', path, request_dict, http_options
1006
+ )
1007
+
1008
+ if self.api_client.vertexai:
1009
+ response_dict = _DeleteResourceJob_from_vertex(
1010
+ self.api_client, response_dict
1011
+ )
1012
+ else:
1013
+ response_dict = _DeleteResourceJob_from_mldev(
1014
+ self.api_client, response_dict
1015
+ )
1016
+
1017
+ return_value = types.DeleteResourceJob._from_response(
1018
+ response_dict, parameter_model
1019
+ )
1020
+ self.api_client._verify_response(return_value)
1021
+ return return_value
1022
+
1023
+ async def create(
1024
+ self,
1025
+ *,
1026
+ model: str,
1027
+ src: str,
1028
+ config: Optional[types.CreateBatchJobConfigOrDict] = None,
1029
+ ) -> types.BatchJob:
1030
+ config = _extra_utils.format_destination(src, config)
1031
+ return await self._create(model=model, src=src, config=config)
1032
+
1033
+ async def list(
1034
+ self, *, config: Optional[types.ListBatchJobConfigOrDict] = None
1035
+ ) -> AsyncPager[types.BatchJob]:
1036
+ return AsyncPager(
1037
+ 'batch_jobs',
1038
+ self._list,
1039
+ await self._list(config=config),
1040
+ config,
1041
+ )