google-genai 0.3.0__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/__init__.py +2 -1
- google/genai/_api_client.py +161 -52
- google/genai/_automatic_function_calling_util.py +14 -14
- google/genai/_common.py +14 -29
- google/genai/_replay_api_client.py +13 -54
- google/genai/_transformers.py +38 -0
- google/genai/batches.py +80 -78
- google/genai/caches.py +112 -98
- google/genai/chats.py +7 -10
- google/genai/client.py +6 -3
- google/genai/files.py +91 -90
- google/genai/live.py +65 -34
- google/genai/models.py +374 -297
- google/genai/tunings.py +87 -85
- google/genai/types.py +167 -82
- google/genai/version.py +16 -0
- {google_genai-0.3.0.dist-info → google_genai-0.5.0.dist-info}/METADATA +57 -17
- google_genai-0.5.0.dist-info/RECORD +25 -0
- {google_genai-0.3.0.dist-info → google_genai-0.5.0.dist-info}/WHEEL +1 -1
- google_genai-0.3.0.dist-info/RECORD +0 -24
- {google_genai-0.3.0.dist-info → google_genai-0.5.0.dist-info}/LICENSE +0 -0
- {google_genai-0.3.0.dist-info → google_genai-0.5.0.dist-info}/top_level.txt +0 -0
@@ -15,6 +15,7 @@
|
|
15
15
|
|
16
16
|
"""Replay API client."""
|
17
17
|
|
18
|
+
import base64
|
18
19
|
import copy
|
19
20
|
import inspect
|
20
21
|
import json
|
@@ -24,7 +25,6 @@ import datetime
|
|
24
25
|
from typing import Any, Literal, Optional, Union
|
25
26
|
|
26
27
|
import google.auth
|
27
|
-
from pydantic import BaseModel
|
28
28
|
from requests.exceptions import HTTPError
|
29
29
|
|
30
30
|
from . import errors
|
@@ -33,6 +33,7 @@ from ._api_client import HttpOptions
|
|
33
33
|
from ._api_client import HttpRequest
|
34
34
|
from ._api_client import HttpResponse
|
35
35
|
from ._api_client import RequestJsonEncoder
|
36
|
+
from ._common import BaseModel
|
36
37
|
|
37
38
|
def _redact_version_numbers(version_string: str) -> str:
|
38
39
|
"""Redacts version numbers in the form x.y.z from a string."""
|
@@ -71,6 +72,11 @@ def _redact_request_url(url: str) -> str:
|
|
71
72
|
'{VERTEX_URL_PREFIX}/',
|
72
73
|
url,
|
73
74
|
)
|
75
|
+
result = re.sub(
|
76
|
+
r'.*-aiplatform.googleapis.com/[^/]+/',
|
77
|
+
'{VERTEX_URL_PREFIX}/',
|
78
|
+
result,
|
79
|
+
)
|
74
80
|
result = re.sub(
|
75
81
|
r'https://generativelanguage.googleapis.com/[^/]+',
|
76
82
|
'{MLDEV_URL_PREFIX}',
|
@@ -105,28 +111,6 @@ def redact_http_request(http_request: HttpRequest):
|
|
105
111
|
_redact_request_body(http_request.data)
|
106
112
|
|
107
113
|
|
108
|
-
def process_bytes_fields(data: dict[str, object]):
|
109
|
-
"""Converts bytes fields to strings.
|
110
|
-
|
111
|
-
This function doesn't modify the content of data dict.
|
112
|
-
"""
|
113
|
-
if not isinstance(data, dict):
|
114
|
-
return data
|
115
|
-
for key, value in data.items():
|
116
|
-
if isinstance(value, bytes):
|
117
|
-
data[key] = value.decode()
|
118
|
-
elif isinstance(value, dict):
|
119
|
-
process_bytes_fields(value)
|
120
|
-
elif isinstance(value, list):
|
121
|
-
if all(isinstance(v, bytes) for v in value):
|
122
|
-
data[key] = [v.decode() for v in value]
|
123
|
-
else:
|
124
|
-
data[key] = [process_bytes_fields(v) for v in value]
|
125
|
-
else:
|
126
|
-
data[key] = value
|
127
|
-
return data
|
128
|
-
|
129
|
-
|
130
114
|
def _current_file_path_and_line():
|
131
115
|
"""Prints the current file path and line number."""
|
132
116
|
frame = inspect.currentframe().f_back.f_back
|
@@ -185,7 +169,7 @@ class ReplayFile(BaseModel):
|
|
185
169
|
|
186
170
|
|
187
171
|
class ReplayApiClient(ApiClient):
|
188
|
-
"""For integration testing, send recorded
|
172
|
+
"""For integration testing, send recorded response or records a response."""
|
189
173
|
|
190
174
|
def __init__(
|
191
175
|
self,
|
@@ -282,7 +266,7 @@ class ReplayApiClient(ApiClient):
|
|
282
266
|
with open(replay_file_path, 'w') as f:
|
283
267
|
f.write(
|
284
268
|
json.dumps(
|
285
|
-
self.replay_session.model_dump(), indent=2, cls=ResponseJsonEncoder
|
269
|
+
self.replay_session.model_dump(mode='json'), indent=2, cls=ResponseJsonEncoder
|
286
270
|
)
|
287
271
|
)
|
288
272
|
self.replay_session = None
|
@@ -383,15 +367,8 @@ class ReplayApiClient(ApiClient):
|
|
383
367
|
if isinstance(response_model, list):
|
384
368
|
response_model = response_model[0]
|
385
369
|
print('response_model: ', response_model.model_dump(exclude_none=True))
|
386
|
-
actual =
|
387
|
-
|
388
|
-
cls=ResponseJsonEncoder,
|
389
|
-
sort_keys=True,
|
390
|
-
)
|
391
|
-
expected = json.dumps(
|
392
|
-
interaction.response.sdk_response_segments[self._sdk_response_index],
|
393
|
-
sort_keys=True,
|
394
|
-
)
|
370
|
+
actual = response_model.model_dump(exclude_none=True, mode='json')
|
371
|
+
expected = interaction.response.sdk_response_segments[self._sdk_response_index]
|
395
372
|
assert (
|
396
373
|
actual == expected
|
397
374
|
), f'SDK response mismatch:\nActual: {actual}\nExpected: {expected}'
|
@@ -444,30 +421,12 @@ class ReplayApiClient(ApiClient):
|
|
444
421
|
return self._build_response_from_replay(request).text
|
445
422
|
|
446
423
|
|
424
|
+
# TODO(b/389693448): Cleanup datetime hacks.
|
447
425
|
class ResponseJsonEncoder(json.JSONEncoder):
|
448
426
|
"""The replay test json encoder for response.
|
449
|
-
|
450
|
-
We need RequestJsonEncoder and ResponseJsonEncoder because:
|
451
|
-
1. In production, we only need RequestJsonEncoder to help json module
|
452
|
-
to convert non-stringable and stringable types to json string. Especially
|
453
|
-
for bytes type, the value of bytes field is encoded to base64 string so it
|
454
|
-
is always stringable and the RequestJsonEncoder doesn't have to deal with
|
455
|
-
utf-8 JSON broken issue.
|
456
|
-
2. In replay test, we also need ResponseJsonEncoder to help json module
|
457
|
-
convert non-stringable and stringable types to json string. But response
|
458
|
-
object returned from SDK method is different from the request api_client
|
459
|
-
sent to server. For the bytes type, there is no base64 string in response
|
460
|
-
anymore, because SDK handles it internally. So bytes type in Response is
|
461
|
-
non-stringable. The ResponseJsonEncoder uses different encoding
|
462
|
-
strategy than the RequestJsonEncoder to deal with utf-8 JSON broken issue.
|
463
427
|
"""
|
464
428
|
def default(self, o):
|
465
|
-
if isinstance(o,
|
466
|
-
# use error replace because response need to be serialized with bytes
|
467
|
-
# string, not base64 string. Otherwise, we cannot tell the response is
|
468
|
-
# already decoded from base64 or not from the replay file.
|
469
|
-
return o.decode(encoding='utf-8', errors='replace')
|
470
|
-
elif isinstance(o, datetime.datetime):
|
429
|
+
if isinstance(o, datetime.datetime):
|
471
430
|
# dt.isoformat() prints "2024-11-15T23:27:45.624657+00:00"
|
472
431
|
# but replay files want "2024-11-15T23:27:45.624657Z"
|
473
432
|
if o.isoformat().endswith('+00:00'):
|
google/genai/_transformers.py
CHANGED
@@ -142,6 +142,30 @@ def t_model(client: _api_client.ApiClient, model: str):
|
|
142
142
|
else:
|
143
143
|
return f'models/{model}'
|
144
144
|
|
145
|
+
def t_models_url(api_client: _api_client.ApiClient, base_models: bool) -> str:
|
146
|
+
if api_client.vertexai:
|
147
|
+
if base_models:
|
148
|
+
return 'publishers/google/models'
|
149
|
+
else:
|
150
|
+
return 'models'
|
151
|
+
else:
|
152
|
+
if base_models:
|
153
|
+
return 'models'
|
154
|
+
else:
|
155
|
+
return 'tunedModels'
|
156
|
+
|
157
|
+
|
158
|
+
def t_extract_models(api_client: _api_client.ApiClient, response: dict) -> list[types.Model]:
|
159
|
+
if response.get('models') is not None:
|
160
|
+
return response.get('models')
|
161
|
+
elif response.get('tunedModels') is not None:
|
162
|
+
return response.get('tunedModels')
|
163
|
+
elif response.get('publisherModels') is not None:
|
164
|
+
return response.get('publisherModels')
|
165
|
+
else:
|
166
|
+
raise ValueError('Cannot determine the models type.')
|
167
|
+
|
168
|
+
|
145
169
|
def t_caches_model(api_client: _api_client.ApiClient, model: str):
|
146
170
|
model = t_model(api_client, model)
|
147
171
|
if not model:
|
@@ -452,3 +476,17 @@ def t_tuning_job_status(
|
|
452
476
|
return 'JOB_STATE_FAILED'
|
453
477
|
else:
|
454
478
|
return status
|
479
|
+
|
480
|
+
|
481
|
+
# Some fields don't accept url safe base64 encoding.
|
482
|
+
# We shouldn't use this transformer if the backend adhere to Cloud Type
|
483
|
+
# format https://cloud.google.com/docs/discovery/type-format.
|
484
|
+
# TODO(b/389133914): Remove the hack after Vertex backend fix the issue.
|
485
|
+
def t_bytes(api_client: _api_client.ApiClient, data: bytes) -> str:
|
486
|
+
if not isinstance(data, bytes):
|
487
|
+
return data
|
488
|
+
if api_client.vertexai:
|
489
|
+
return base64.b64encode(data).decode('ascii')
|
490
|
+
else:
|
491
|
+
return base64.urlsafe_encode(data).decode('ascii')
|
492
|
+
|
google/genai/batches.py
CHANGED
@@ -13,6 +13,8 @@
|
|
13
13
|
# limitations under the License.
|
14
14
|
#
|
15
15
|
|
16
|
+
# Code generated by the Google Gen AI SDK generator DO NOT EDIT.
|
17
|
+
|
16
18
|
from typing import Optional, Union
|
17
19
|
from urllib.parse import urlencode
|
18
20
|
from . import _common
|
@@ -31,13 +33,13 @@ def _BatchJobSource_to_mldev(
|
|
31
33
|
parent_object: dict = None,
|
32
34
|
) -> dict:
|
33
35
|
to_object = {}
|
34
|
-
if getv(from_object, ['format']):
|
36
|
+
if getv(from_object, ['format']) is not None:
|
35
37
|
raise ValueError('format parameter is not supported in Google AI.')
|
36
38
|
|
37
|
-
if getv(from_object, ['gcs_uri']):
|
39
|
+
if getv(from_object, ['gcs_uri']) is not None:
|
38
40
|
raise ValueError('gcs_uri parameter is not supported in Google AI.')
|
39
41
|
|
40
|
-
if getv(from_object, ['bigquery_uri']):
|
42
|
+
if getv(from_object, ['bigquery_uri']) is not None:
|
41
43
|
raise ValueError('bigquery_uri parameter is not supported in Google AI.')
|
42
44
|
|
43
45
|
return to_object
|
@@ -71,13 +73,13 @@ def _BatchJobDestination_to_mldev(
|
|
71
73
|
parent_object: dict = None,
|
72
74
|
) -> dict:
|
73
75
|
to_object = {}
|
74
|
-
if getv(from_object, ['format']):
|
76
|
+
if getv(from_object, ['format']) is not None:
|
75
77
|
raise ValueError('format parameter is not supported in Google AI.')
|
76
78
|
|
77
|
-
if getv(from_object, ['gcs_uri']):
|
79
|
+
if getv(from_object, ['gcs_uri']) is not None:
|
78
80
|
raise ValueError('gcs_uri parameter is not supported in Google AI.')
|
79
81
|
|
80
|
-
if getv(from_object, ['bigquery_uri']):
|
82
|
+
if getv(from_object, ['bigquery_uri']) is not None:
|
81
83
|
raise ValueError('bigquery_uri parameter is not supported in Google AI.')
|
82
84
|
|
83
85
|
return to_object
|
@@ -121,7 +123,7 @@ def _CreateBatchJobConfig_to_mldev(
|
|
121
123
|
if getv(from_object, ['display_name']) is not None:
|
122
124
|
setv(parent_object, ['displayName'], getv(from_object, ['display_name']))
|
123
125
|
|
124
|
-
if getv(from_object, ['dest']):
|
126
|
+
if getv(from_object, ['dest']) is not None:
|
125
127
|
raise ValueError('dest parameter is not supported in Google AI.')
|
126
128
|
|
127
129
|
return to_object
|
@@ -159,10 +161,10 @@ def _CreateBatchJobParameters_to_mldev(
|
|
159
161
|
parent_object: dict = None,
|
160
162
|
) -> dict:
|
161
163
|
to_object = {}
|
162
|
-
if getv(from_object, ['model']):
|
164
|
+
if getv(from_object, ['model']) is not None:
|
163
165
|
raise ValueError('model parameter is not supported in Google AI.')
|
164
166
|
|
165
|
-
if getv(from_object, ['src']):
|
167
|
+
if getv(from_object, ['src']) is not None:
|
166
168
|
raise ValueError('src parameter is not supported in Google AI.')
|
167
169
|
|
168
170
|
if getv(from_object, ['config']) is not None:
|
@@ -243,7 +245,7 @@ def _GetBatchJobParameters_to_mldev(
|
|
243
245
|
parent_object: dict = None,
|
244
246
|
) -> dict:
|
245
247
|
to_object = {}
|
246
|
-
if getv(from_object, ['name']):
|
248
|
+
if getv(from_object, ['name']) is not None:
|
247
249
|
raise ValueError('name parameter is not supported in Google AI.')
|
248
250
|
|
249
251
|
if getv(from_object, ['config']) is not None:
|
@@ -313,7 +315,7 @@ def _CancelBatchJobParameters_to_mldev(
|
|
313
315
|
parent_object: dict = None,
|
314
316
|
) -> dict:
|
315
317
|
to_object = {}
|
316
|
-
if getv(from_object, ['name']):
|
318
|
+
if getv(from_object, ['name']) is not None:
|
317
319
|
raise ValueError('name parameter is not supported in Google AI.')
|
318
320
|
|
319
321
|
if getv(from_object, ['config']) is not None:
|
@@ -374,7 +376,7 @@ def _ListBatchJobConfig_to_mldev(
|
|
374
376
|
getv(from_object, ['page_token']),
|
375
377
|
)
|
376
378
|
|
377
|
-
if getv(from_object, ['filter']):
|
379
|
+
if getv(from_object, ['filter']) is not None:
|
378
380
|
raise ValueError('filter parameter is not supported in Google AI.')
|
379
381
|
|
380
382
|
return to_object
|
@@ -413,7 +415,7 @@ def _ListBatchJobParameters_to_mldev(
|
|
413
415
|
parent_object: dict = None,
|
414
416
|
) -> dict:
|
415
417
|
to_object = {}
|
416
|
-
if getv(from_object, ['config']):
|
418
|
+
if getv(from_object, ['config']) is not None:
|
417
419
|
raise ValueError('config parameter is not supported in Google AI.')
|
418
420
|
|
419
421
|
return to_object
|
@@ -443,7 +445,7 @@ def _DeleteBatchJobParameters_to_mldev(
|
|
443
445
|
parent_object: dict = None,
|
444
446
|
) -> dict:
|
445
447
|
to_object = {}
|
446
|
-
if getv(from_object, ['name']):
|
448
|
+
if getv(from_object, ['name']) is not None:
|
447
449
|
raise ValueError('name parameter is not supported in Google AI.')
|
448
450
|
|
449
451
|
return to_object
|
@@ -714,11 +716,11 @@ class Batches(_common.BaseModule):
|
|
714
716
|
config=config,
|
715
717
|
)
|
716
718
|
|
717
|
-
if not self.
|
719
|
+
if not self._api_client.vertexai:
|
718
720
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
719
721
|
else:
|
720
722
|
request_dict = _CreateBatchJobParameters_to_vertex(
|
721
|
-
self.
|
723
|
+
self._api_client, parameter_model
|
722
724
|
)
|
723
725
|
path = 'batchPredictionJobs'.format_map(request_dict.get('_url'))
|
724
726
|
|
@@ -731,17 +733,17 @@ class Batches(_common.BaseModule):
|
|
731
733
|
request_dict = _common.convert_to_dict(request_dict)
|
732
734
|
request_dict = _common.apply_base64_encoding(request_dict)
|
733
735
|
|
734
|
-
response_dict = self.
|
736
|
+
response_dict = self._api_client.request(
|
735
737
|
'post', path, request_dict, http_options
|
736
738
|
)
|
737
739
|
|
738
|
-
if self.
|
739
|
-
response_dict = _BatchJob_from_vertex(self.
|
740
|
+
if self._api_client.vertexai:
|
741
|
+
response_dict = _BatchJob_from_vertex(self._api_client, response_dict)
|
740
742
|
else:
|
741
|
-
response_dict = _BatchJob_from_mldev(self.
|
743
|
+
response_dict = _BatchJob_from_mldev(self._api_client, response_dict)
|
742
744
|
|
743
745
|
return_value = types.BatchJob._from_response(response_dict, parameter_model)
|
744
|
-
self.
|
746
|
+
self._api_client._verify_response(return_value)
|
745
747
|
return return_value
|
746
748
|
|
747
749
|
def get(
|
@@ -770,11 +772,11 @@ class Batches(_common.BaseModule):
|
|
770
772
|
config=config,
|
771
773
|
)
|
772
774
|
|
773
|
-
if not self.
|
775
|
+
if not self._api_client.vertexai:
|
774
776
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
775
777
|
else:
|
776
778
|
request_dict = _GetBatchJobParameters_to_vertex(
|
777
|
-
self.
|
779
|
+
self._api_client, parameter_model
|
778
780
|
)
|
779
781
|
path = 'batchPredictionJobs/{name}'.format_map(request_dict.get('_url'))
|
780
782
|
|
@@ -787,17 +789,17 @@ class Batches(_common.BaseModule):
|
|
787
789
|
request_dict = _common.convert_to_dict(request_dict)
|
788
790
|
request_dict = _common.apply_base64_encoding(request_dict)
|
789
791
|
|
790
|
-
response_dict = self.
|
792
|
+
response_dict = self._api_client.request(
|
791
793
|
'get', path, request_dict, http_options
|
792
794
|
)
|
793
795
|
|
794
|
-
if self.
|
795
|
-
response_dict = _BatchJob_from_vertex(self.
|
796
|
+
if self._api_client.vertexai:
|
797
|
+
response_dict = _BatchJob_from_vertex(self._api_client, response_dict)
|
796
798
|
else:
|
797
|
-
response_dict = _BatchJob_from_mldev(self.
|
799
|
+
response_dict = _BatchJob_from_mldev(self._api_client, response_dict)
|
798
800
|
|
799
801
|
return_value = types.BatchJob._from_response(response_dict, parameter_model)
|
800
|
-
self.
|
802
|
+
self._api_client._verify_response(return_value)
|
801
803
|
return return_value
|
802
804
|
|
803
805
|
def cancel(
|
@@ -811,11 +813,11 @@ class Batches(_common.BaseModule):
|
|
811
813
|
config=config,
|
812
814
|
)
|
813
815
|
|
814
|
-
if not self.
|
816
|
+
if not self._api_client.vertexai:
|
815
817
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
816
818
|
else:
|
817
819
|
request_dict = _CancelBatchJobParameters_to_vertex(
|
818
|
-
self.
|
820
|
+
self._api_client, parameter_model
|
819
821
|
)
|
820
822
|
path = 'batchPredictionJobs/{name}:cancel'.format_map(
|
821
823
|
request_dict.get('_url')
|
@@ -830,7 +832,7 @@ class Batches(_common.BaseModule):
|
|
830
832
|
request_dict = _common.convert_to_dict(request_dict)
|
831
833
|
request_dict = _common.apply_base64_encoding(request_dict)
|
832
834
|
|
833
|
-
response_dict = self.
|
835
|
+
response_dict = self._api_client.request(
|
834
836
|
'post', path, request_dict, http_options
|
835
837
|
)
|
836
838
|
|
@@ -841,11 +843,11 @@ class Batches(_common.BaseModule):
|
|
841
843
|
config=config,
|
842
844
|
)
|
843
845
|
|
844
|
-
if not self.
|
846
|
+
if not self._api_client.vertexai:
|
845
847
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
846
848
|
else:
|
847
849
|
request_dict = _ListBatchJobParameters_to_vertex(
|
848
|
-
self.
|
850
|
+
self._api_client, parameter_model
|
849
851
|
)
|
850
852
|
path = 'batchPredictionJobs'.format_map(request_dict.get('_url'))
|
851
853
|
|
@@ -858,23 +860,23 @@ class Batches(_common.BaseModule):
|
|
858
860
|
request_dict = _common.convert_to_dict(request_dict)
|
859
861
|
request_dict = _common.apply_base64_encoding(request_dict)
|
860
862
|
|
861
|
-
response_dict = self.
|
863
|
+
response_dict = self._api_client.request(
|
862
864
|
'get', path, request_dict, http_options
|
863
865
|
)
|
864
866
|
|
865
|
-
if self.
|
867
|
+
if self._api_client.vertexai:
|
866
868
|
response_dict = _ListBatchJobResponse_from_vertex(
|
867
|
-
self.
|
869
|
+
self._api_client, response_dict
|
868
870
|
)
|
869
871
|
else:
|
870
872
|
response_dict = _ListBatchJobResponse_from_mldev(
|
871
|
-
self.
|
873
|
+
self._api_client, response_dict
|
872
874
|
)
|
873
875
|
|
874
876
|
return_value = types.ListBatchJobResponse._from_response(
|
875
877
|
response_dict, parameter_model
|
876
878
|
)
|
877
|
-
self.
|
879
|
+
self._api_client._verify_response(return_value)
|
878
880
|
return return_value
|
879
881
|
|
880
882
|
def delete(self, *, name: str) -> types.DeleteResourceJob:
|
@@ -899,11 +901,11 @@ class Batches(_common.BaseModule):
|
|
899
901
|
name=name,
|
900
902
|
)
|
901
903
|
|
902
|
-
if not self.
|
904
|
+
if not self._api_client.vertexai:
|
903
905
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
904
906
|
else:
|
905
907
|
request_dict = _DeleteBatchJobParameters_to_vertex(
|
906
|
-
self.
|
908
|
+
self._api_client, parameter_model
|
907
909
|
)
|
908
910
|
path = 'batchPredictionJobs/{name}'.format_map(request_dict.get('_url'))
|
909
911
|
|
@@ -916,23 +918,23 @@ class Batches(_common.BaseModule):
|
|
916
918
|
request_dict = _common.convert_to_dict(request_dict)
|
917
919
|
request_dict = _common.apply_base64_encoding(request_dict)
|
918
920
|
|
919
|
-
response_dict = self.
|
921
|
+
response_dict = self._api_client.request(
|
920
922
|
'delete', path, request_dict, http_options
|
921
923
|
)
|
922
924
|
|
923
|
-
if self.
|
925
|
+
if self._api_client.vertexai:
|
924
926
|
response_dict = _DeleteResourceJob_from_vertex(
|
925
|
-
self.
|
927
|
+
self._api_client, response_dict
|
926
928
|
)
|
927
929
|
else:
|
928
930
|
response_dict = _DeleteResourceJob_from_mldev(
|
929
|
-
self.
|
931
|
+
self._api_client, response_dict
|
930
932
|
)
|
931
933
|
|
932
934
|
return_value = types.DeleteResourceJob._from_response(
|
933
935
|
response_dict, parameter_model
|
934
936
|
)
|
935
|
-
self.
|
937
|
+
self._api_client._verify_response(return_value)
|
936
938
|
return return_value
|
937
939
|
|
938
940
|
def create(
|
@@ -947,7 +949,7 @@ class Batches(_common.BaseModule):
|
|
947
949
|
Args:
|
948
950
|
model (str): The model to use for the batch job.
|
949
951
|
src (str): The source of the batch job. Currently supports GCS URI(-s) or
|
950
|
-
|
952
|
+
BigQuery URI. Example: "gs://path/to/input/data" or
|
951
953
|
"bq://projectId.bqDatasetId.bqTableId".
|
952
954
|
config (CreateBatchJobConfig): Optional configuration for the batch job.
|
953
955
|
|
@@ -1010,11 +1012,11 @@ class AsyncBatches(_common.BaseModule):
|
|
1010
1012
|
config=config,
|
1011
1013
|
)
|
1012
1014
|
|
1013
|
-
if not self.
|
1015
|
+
if not self._api_client.vertexai:
|
1014
1016
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
1015
1017
|
else:
|
1016
1018
|
request_dict = _CreateBatchJobParameters_to_vertex(
|
1017
|
-
self.
|
1019
|
+
self._api_client, parameter_model
|
1018
1020
|
)
|
1019
1021
|
path = 'batchPredictionJobs'.format_map(request_dict.get('_url'))
|
1020
1022
|
|
@@ -1027,17 +1029,17 @@ class AsyncBatches(_common.BaseModule):
|
|
1027
1029
|
request_dict = _common.convert_to_dict(request_dict)
|
1028
1030
|
request_dict = _common.apply_base64_encoding(request_dict)
|
1029
1031
|
|
1030
|
-
response_dict = await self.
|
1032
|
+
response_dict = await self._api_client.async_request(
|
1031
1033
|
'post', path, request_dict, http_options
|
1032
1034
|
)
|
1033
1035
|
|
1034
|
-
if self.
|
1035
|
-
response_dict = _BatchJob_from_vertex(self.
|
1036
|
+
if self._api_client.vertexai:
|
1037
|
+
response_dict = _BatchJob_from_vertex(self._api_client, response_dict)
|
1036
1038
|
else:
|
1037
|
-
response_dict = _BatchJob_from_mldev(self.
|
1039
|
+
response_dict = _BatchJob_from_mldev(self._api_client, response_dict)
|
1038
1040
|
|
1039
1041
|
return_value = types.BatchJob._from_response(response_dict, parameter_model)
|
1040
|
-
self.
|
1042
|
+
self._api_client._verify_response(return_value)
|
1041
1043
|
return return_value
|
1042
1044
|
|
1043
1045
|
async def get(
|
@@ -1066,11 +1068,11 @@ class AsyncBatches(_common.BaseModule):
|
|
1066
1068
|
config=config,
|
1067
1069
|
)
|
1068
1070
|
|
1069
|
-
if not self.
|
1071
|
+
if not self._api_client.vertexai:
|
1070
1072
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
1071
1073
|
else:
|
1072
1074
|
request_dict = _GetBatchJobParameters_to_vertex(
|
1073
|
-
self.
|
1075
|
+
self._api_client, parameter_model
|
1074
1076
|
)
|
1075
1077
|
path = 'batchPredictionJobs/{name}'.format_map(request_dict.get('_url'))
|
1076
1078
|
|
@@ -1083,17 +1085,17 @@ class AsyncBatches(_common.BaseModule):
|
|
1083
1085
|
request_dict = _common.convert_to_dict(request_dict)
|
1084
1086
|
request_dict = _common.apply_base64_encoding(request_dict)
|
1085
1087
|
|
1086
|
-
response_dict = await self.
|
1088
|
+
response_dict = await self._api_client.async_request(
|
1087
1089
|
'get', path, request_dict, http_options
|
1088
1090
|
)
|
1089
1091
|
|
1090
|
-
if self.
|
1091
|
-
response_dict = _BatchJob_from_vertex(self.
|
1092
|
+
if self._api_client.vertexai:
|
1093
|
+
response_dict = _BatchJob_from_vertex(self._api_client, response_dict)
|
1092
1094
|
else:
|
1093
|
-
response_dict = _BatchJob_from_mldev(self.
|
1095
|
+
response_dict = _BatchJob_from_mldev(self._api_client, response_dict)
|
1094
1096
|
|
1095
1097
|
return_value = types.BatchJob._from_response(response_dict, parameter_model)
|
1096
|
-
self.
|
1098
|
+
self._api_client._verify_response(return_value)
|
1097
1099
|
return return_value
|
1098
1100
|
|
1099
1101
|
async def cancel(
|
@@ -1107,11 +1109,11 @@ class AsyncBatches(_common.BaseModule):
|
|
1107
1109
|
config=config,
|
1108
1110
|
)
|
1109
1111
|
|
1110
|
-
if not self.
|
1112
|
+
if not self._api_client.vertexai:
|
1111
1113
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
1112
1114
|
else:
|
1113
1115
|
request_dict = _CancelBatchJobParameters_to_vertex(
|
1114
|
-
self.
|
1116
|
+
self._api_client, parameter_model
|
1115
1117
|
)
|
1116
1118
|
path = 'batchPredictionJobs/{name}:cancel'.format_map(
|
1117
1119
|
request_dict.get('_url')
|
@@ -1126,7 +1128,7 @@ class AsyncBatches(_common.BaseModule):
|
|
1126
1128
|
request_dict = _common.convert_to_dict(request_dict)
|
1127
1129
|
request_dict = _common.apply_base64_encoding(request_dict)
|
1128
1130
|
|
1129
|
-
response_dict = await self.
|
1131
|
+
response_dict = await self._api_client.async_request(
|
1130
1132
|
'post', path, request_dict, http_options
|
1131
1133
|
)
|
1132
1134
|
|
@@ -1137,11 +1139,11 @@ class AsyncBatches(_common.BaseModule):
|
|
1137
1139
|
config=config,
|
1138
1140
|
)
|
1139
1141
|
|
1140
|
-
if not self.
|
1142
|
+
if not self._api_client.vertexai:
|
1141
1143
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
1142
1144
|
else:
|
1143
1145
|
request_dict = _ListBatchJobParameters_to_vertex(
|
1144
|
-
self.
|
1146
|
+
self._api_client, parameter_model
|
1145
1147
|
)
|
1146
1148
|
path = 'batchPredictionJobs'.format_map(request_dict.get('_url'))
|
1147
1149
|
|
@@ -1154,23 +1156,23 @@ class AsyncBatches(_common.BaseModule):
|
|
1154
1156
|
request_dict = _common.convert_to_dict(request_dict)
|
1155
1157
|
request_dict = _common.apply_base64_encoding(request_dict)
|
1156
1158
|
|
1157
|
-
response_dict = await self.
|
1159
|
+
response_dict = await self._api_client.async_request(
|
1158
1160
|
'get', path, request_dict, http_options
|
1159
1161
|
)
|
1160
1162
|
|
1161
|
-
if self.
|
1163
|
+
if self._api_client.vertexai:
|
1162
1164
|
response_dict = _ListBatchJobResponse_from_vertex(
|
1163
|
-
self.
|
1165
|
+
self._api_client, response_dict
|
1164
1166
|
)
|
1165
1167
|
else:
|
1166
1168
|
response_dict = _ListBatchJobResponse_from_mldev(
|
1167
|
-
self.
|
1169
|
+
self._api_client, response_dict
|
1168
1170
|
)
|
1169
1171
|
|
1170
1172
|
return_value = types.ListBatchJobResponse._from_response(
|
1171
1173
|
response_dict, parameter_model
|
1172
1174
|
)
|
1173
|
-
self.
|
1175
|
+
self._api_client._verify_response(return_value)
|
1174
1176
|
return return_value
|
1175
1177
|
|
1176
1178
|
async def delete(self, *, name: str) -> types.DeleteResourceJob:
|
@@ -1195,11 +1197,11 @@ class AsyncBatches(_common.BaseModule):
|
|
1195
1197
|
name=name,
|
1196
1198
|
)
|
1197
1199
|
|
1198
|
-
if not self.
|
1200
|
+
if not self._api_client.vertexai:
|
1199
1201
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
1200
1202
|
else:
|
1201
1203
|
request_dict = _DeleteBatchJobParameters_to_vertex(
|
1202
|
-
self.
|
1204
|
+
self._api_client, parameter_model
|
1203
1205
|
)
|
1204
1206
|
path = 'batchPredictionJobs/{name}'.format_map(request_dict.get('_url'))
|
1205
1207
|
|
@@ -1212,23 +1214,23 @@ class AsyncBatches(_common.BaseModule):
|
|
1212
1214
|
request_dict = _common.convert_to_dict(request_dict)
|
1213
1215
|
request_dict = _common.apply_base64_encoding(request_dict)
|
1214
1216
|
|
1215
|
-
response_dict = await self.
|
1217
|
+
response_dict = await self._api_client.async_request(
|
1216
1218
|
'delete', path, request_dict, http_options
|
1217
1219
|
)
|
1218
1220
|
|
1219
|
-
if self.
|
1221
|
+
if self._api_client.vertexai:
|
1220
1222
|
response_dict = _DeleteResourceJob_from_vertex(
|
1221
|
-
self.
|
1223
|
+
self._api_client, response_dict
|
1222
1224
|
)
|
1223
1225
|
else:
|
1224
1226
|
response_dict = _DeleteResourceJob_from_mldev(
|
1225
|
-
self.
|
1227
|
+
self._api_client, response_dict
|
1226
1228
|
)
|
1227
1229
|
|
1228
1230
|
return_value = types.DeleteResourceJob._from_response(
|
1229
1231
|
response_dict, parameter_model
|
1230
1232
|
)
|
1231
|
-
self.
|
1233
|
+
self._api_client._verify_response(return_value)
|
1232
1234
|
return return_value
|
1233
1235
|
|
1234
1236
|
async def create(
|
@@ -1243,7 +1245,7 @@ class AsyncBatches(_common.BaseModule):
|
|
1243
1245
|
Args:
|
1244
1246
|
model (str): The model to use for the batch job.
|
1245
1247
|
src (str): The source of the batch job. Currently supports GCS URI(-s) or
|
1246
|
-
|
1248
|
+
BigQuery URI. Example: "gs://path/to/input/data" or
|
1247
1249
|
"bq://projectId.bqDatasetId.bqTableId".
|
1248
1250
|
config (CreateBatchJobConfig): Optional configuration for the batch job.
|
1249
1251
|
|