google-genai 0.4.0__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/_api_client.py +80 -20
- google/genai/_common.py +14 -29
- google/genai/_replay_api_client.py +11 -46
- google/genai/_transformers.py +38 -0
- google/genai/batches.py +64 -62
- google/genai/caches.py +94 -80
- google/genai/chats.py +5 -8
- google/genai/files.py +69 -68
- google/genai/live.py +37 -29
- google/genai/models.py +277 -220
- google/genai/tunings.py +70 -68
- google/genai/types.py +17 -2
- google/genai/version.py +1 -1
- {google_genai-0.4.0.dist-info → google_genai-0.5.0.dist-info}/METADATA +1 -1
- google_genai-0.5.0.dist-info/RECORD +25 -0
- google_genai-0.4.0.dist-info/RECORD +0 -25
- {google_genai-0.4.0.dist-info → google_genai-0.5.0.dist-info}/LICENSE +0 -0
- {google_genai-0.4.0.dist-info → google_genai-0.5.0.dist-info}/WHEEL +0 -0
- {google_genai-0.4.0.dist-info → google_genai-0.5.0.dist-info}/top_level.txt +0 -0
google/genai/batches.py
CHANGED
@@ -13,6 +13,8 @@
|
|
13
13
|
# limitations under the License.
|
14
14
|
#
|
15
15
|
|
16
|
+
# Code generated by the Google Gen AI SDK generator DO NOT EDIT.
|
17
|
+
|
16
18
|
from typing import Optional, Union
|
17
19
|
from urllib.parse import urlencode
|
18
20
|
from . import _common
|
@@ -714,11 +716,11 @@ class Batches(_common.BaseModule):
|
|
714
716
|
config=config,
|
715
717
|
)
|
716
718
|
|
717
|
-
if not self.
|
719
|
+
if not self._api_client.vertexai:
|
718
720
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
719
721
|
else:
|
720
722
|
request_dict = _CreateBatchJobParameters_to_vertex(
|
721
|
-
self.
|
723
|
+
self._api_client, parameter_model
|
722
724
|
)
|
723
725
|
path = 'batchPredictionJobs'.format_map(request_dict.get('_url'))
|
724
726
|
|
@@ -731,17 +733,17 @@ class Batches(_common.BaseModule):
|
|
731
733
|
request_dict = _common.convert_to_dict(request_dict)
|
732
734
|
request_dict = _common.apply_base64_encoding(request_dict)
|
733
735
|
|
734
|
-
response_dict = self.
|
736
|
+
response_dict = self._api_client.request(
|
735
737
|
'post', path, request_dict, http_options
|
736
738
|
)
|
737
739
|
|
738
|
-
if self.
|
739
|
-
response_dict = _BatchJob_from_vertex(self.
|
740
|
+
if self._api_client.vertexai:
|
741
|
+
response_dict = _BatchJob_from_vertex(self._api_client, response_dict)
|
740
742
|
else:
|
741
|
-
response_dict = _BatchJob_from_mldev(self.
|
743
|
+
response_dict = _BatchJob_from_mldev(self._api_client, response_dict)
|
742
744
|
|
743
745
|
return_value = types.BatchJob._from_response(response_dict, parameter_model)
|
744
|
-
self.
|
746
|
+
self._api_client._verify_response(return_value)
|
745
747
|
return return_value
|
746
748
|
|
747
749
|
def get(
|
@@ -770,11 +772,11 @@ class Batches(_common.BaseModule):
|
|
770
772
|
config=config,
|
771
773
|
)
|
772
774
|
|
773
|
-
if not self.
|
775
|
+
if not self._api_client.vertexai:
|
774
776
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
775
777
|
else:
|
776
778
|
request_dict = _GetBatchJobParameters_to_vertex(
|
777
|
-
self.
|
779
|
+
self._api_client, parameter_model
|
778
780
|
)
|
779
781
|
path = 'batchPredictionJobs/{name}'.format_map(request_dict.get('_url'))
|
780
782
|
|
@@ -787,17 +789,17 @@ class Batches(_common.BaseModule):
|
|
787
789
|
request_dict = _common.convert_to_dict(request_dict)
|
788
790
|
request_dict = _common.apply_base64_encoding(request_dict)
|
789
791
|
|
790
|
-
response_dict = self.
|
792
|
+
response_dict = self._api_client.request(
|
791
793
|
'get', path, request_dict, http_options
|
792
794
|
)
|
793
795
|
|
794
|
-
if self.
|
795
|
-
response_dict = _BatchJob_from_vertex(self.
|
796
|
+
if self._api_client.vertexai:
|
797
|
+
response_dict = _BatchJob_from_vertex(self._api_client, response_dict)
|
796
798
|
else:
|
797
|
-
response_dict = _BatchJob_from_mldev(self.
|
799
|
+
response_dict = _BatchJob_from_mldev(self._api_client, response_dict)
|
798
800
|
|
799
801
|
return_value = types.BatchJob._from_response(response_dict, parameter_model)
|
800
|
-
self.
|
802
|
+
self._api_client._verify_response(return_value)
|
801
803
|
return return_value
|
802
804
|
|
803
805
|
def cancel(
|
@@ -811,11 +813,11 @@ class Batches(_common.BaseModule):
|
|
811
813
|
config=config,
|
812
814
|
)
|
813
815
|
|
814
|
-
if not self.
|
816
|
+
if not self._api_client.vertexai:
|
815
817
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
816
818
|
else:
|
817
819
|
request_dict = _CancelBatchJobParameters_to_vertex(
|
818
|
-
self.
|
820
|
+
self._api_client, parameter_model
|
819
821
|
)
|
820
822
|
path = 'batchPredictionJobs/{name}:cancel'.format_map(
|
821
823
|
request_dict.get('_url')
|
@@ -830,7 +832,7 @@ class Batches(_common.BaseModule):
|
|
830
832
|
request_dict = _common.convert_to_dict(request_dict)
|
831
833
|
request_dict = _common.apply_base64_encoding(request_dict)
|
832
834
|
|
833
|
-
response_dict = self.
|
835
|
+
response_dict = self._api_client.request(
|
834
836
|
'post', path, request_dict, http_options
|
835
837
|
)
|
836
838
|
|
@@ -841,11 +843,11 @@ class Batches(_common.BaseModule):
|
|
841
843
|
config=config,
|
842
844
|
)
|
843
845
|
|
844
|
-
if not self.
|
846
|
+
if not self._api_client.vertexai:
|
845
847
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
846
848
|
else:
|
847
849
|
request_dict = _ListBatchJobParameters_to_vertex(
|
848
|
-
self.
|
850
|
+
self._api_client, parameter_model
|
849
851
|
)
|
850
852
|
path = 'batchPredictionJobs'.format_map(request_dict.get('_url'))
|
851
853
|
|
@@ -858,23 +860,23 @@ class Batches(_common.BaseModule):
|
|
858
860
|
request_dict = _common.convert_to_dict(request_dict)
|
859
861
|
request_dict = _common.apply_base64_encoding(request_dict)
|
860
862
|
|
861
|
-
response_dict = self.
|
863
|
+
response_dict = self._api_client.request(
|
862
864
|
'get', path, request_dict, http_options
|
863
865
|
)
|
864
866
|
|
865
|
-
if self.
|
867
|
+
if self._api_client.vertexai:
|
866
868
|
response_dict = _ListBatchJobResponse_from_vertex(
|
867
|
-
self.
|
869
|
+
self._api_client, response_dict
|
868
870
|
)
|
869
871
|
else:
|
870
872
|
response_dict = _ListBatchJobResponse_from_mldev(
|
871
|
-
self.
|
873
|
+
self._api_client, response_dict
|
872
874
|
)
|
873
875
|
|
874
876
|
return_value = types.ListBatchJobResponse._from_response(
|
875
877
|
response_dict, parameter_model
|
876
878
|
)
|
877
|
-
self.
|
879
|
+
self._api_client._verify_response(return_value)
|
878
880
|
return return_value
|
879
881
|
|
880
882
|
def delete(self, *, name: str) -> types.DeleteResourceJob:
|
@@ -899,11 +901,11 @@ class Batches(_common.BaseModule):
|
|
899
901
|
name=name,
|
900
902
|
)
|
901
903
|
|
902
|
-
if not self.
|
904
|
+
if not self._api_client.vertexai:
|
903
905
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
904
906
|
else:
|
905
907
|
request_dict = _DeleteBatchJobParameters_to_vertex(
|
906
|
-
self.
|
908
|
+
self._api_client, parameter_model
|
907
909
|
)
|
908
910
|
path = 'batchPredictionJobs/{name}'.format_map(request_dict.get('_url'))
|
909
911
|
|
@@ -916,23 +918,23 @@ class Batches(_common.BaseModule):
|
|
916
918
|
request_dict = _common.convert_to_dict(request_dict)
|
917
919
|
request_dict = _common.apply_base64_encoding(request_dict)
|
918
920
|
|
919
|
-
response_dict = self.
|
921
|
+
response_dict = self._api_client.request(
|
920
922
|
'delete', path, request_dict, http_options
|
921
923
|
)
|
922
924
|
|
923
|
-
if self.
|
925
|
+
if self._api_client.vertexai:
|
924
926
|
response_dict = _DeleteResourceJob_from_vertex(
|
925
|
-
self.
|
927
|
+
self._api_client, response_dict
|
926
928
|
)
|
927
929
|
else:
|
928
930
|
response_dict = _DeleteResourceJob_from_mldev(
|
929
|
-
self.
|
931
|
+
self._api_client, response_dict
|
930
932
|
)
|
931
933
|
|
932
934
|
return_value = types.DeleteResourceJob._from_response(
|
933
935
|
response_dict, parameter_model
|
934
936
|
)
|
935
|
-
self.
|
937
|
+
self._api_client._verify_response(return_value)
|
936
938
|
return return_value
|
937
939
|
|
938
940
|
def create(
|
@@ -1010,11 +1012,11 @@ class AsyncBatches(_common.BaseModule):
|
|
1010
1012
|
config=config,
|
1011
1013
|
)
|
1012
1014
|
|
1013
|
-
if not self.
|
1015
|
+
if not self._api_client.vertexai:
|
1014
1016
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
1015
1017
|
else:
|
1016
1018
|
request_dict = _CreateBatchJobParameters_to_vertex(
|
1017
|
-
self.
|
1019
|
+
self._api_client, parameter_model
|
1018
1020
|
)
|
1019
1021
|
path = 'batchPredictionJobs'.format_map(request_dict.get('_url'))
|
1020
1022
|
|
@@ -1027,17 +1029,17 @@ class AsyncBatches(_common.BaseModule):
|
|
1027
1029
|
request_dict = _common.convert_to_dict(request_dict)
|
1028
1030
|
request_dict = _common.apply_base64_encoding(request_dict)
|
1029
1031
|
|
1030
|
-
response_dict = await self.
|
1032
|
+
response_dict = await self._api_client.async_request(
|
1031
1033
|
'post', path, request_dict, http_options
|
1032
1034
|
)
|
1033
1035
|
|
1034
|
-
if self.
|
1035
|
-
response_dict = _BatchJob_from_vertex(self.
|
1036
|
+
if self._api_client.vertexai:
|
1037
|
+
response_dict = _BatchJob_from_vertex(self._api_client, response_dict)
|
1036
1038
|
else:
|
1037
|
-
response_dict = _BatchJob_from_mldev(self.
|
1039
|
+
response_dict = _BatchJob_from_mldev(self._api_client, response_dict)
|
1038
1040
|
|
1039
1041
|
return_value = types.BatchJob._from_response(response_dict, parameter_model)
|
1040
|
-
self.
|
1042
|
+
self._api_client._verify_response(return_value)
|
1041
1043
|
return return_value
|
1042
1044
|
|
1043
1045
|
async def get(
|
@@ -1066,11 +1068,11 @@ class AsyncBatches(_common.BaseModule):
|
|
1066
1068
|
config=config,
|
1067
1069
|
)
|
1068
1070
|
|
1069
|
-
if not self.
|
1071
|
+
if not self._api_client.vertexai:
|
1070
1072
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
1071
1073
|
else:
|
1072
1074
|
request_dict = _GetBatchJobParameters_to_vertex(
|
1073
|
-
self.
|
1075
|
+
self._api_client, parameter_model
|
1074
1076
|
)
|
1075
1077
|
path = 'batchPredictionJobs/{name}'.format_map(request_dict.get('_url'))
|
1076
1078
|
|
@@ -1083,17 +1085,17 @@ class AsyncBatches(_common.BaseModule):
|
|
1083
1085
|
request_dict = _common.convert_to_dict(request_dict)
|
1084
1086
|
request_dict = _common.apply_base64_encoding(request_dict)
|
1085
1087
|
|
1086
|
-
response_dict = await self.
|
1088
|
+
response_dict = await self._api_client.async_request(
|
1087
1089
|
'get', path, request_dict, http_options
|
1088
1090
|
)
|
1089
1091
|
|
1090
|
-
if self.
|
1091
|
-
response_dict = _BatchJob_from_vertex(self.
|
1092
|
+
if self._api_client.vertexai:
|
1093
|
+
response_dict = _BatchJob_from_vertex(self._api_client, response_dict)
|
1092
1094
|
else:
|
1093
|
-
response_dict = _BatchJob_from_mldev(self.
|
1095
|
+
response_dict = _BatchJob_from_mldev(self._api_client, response_dict)
|
1094
1096
|
|
1095
1097
|
return_value = types.BatchJob._from_response(response_dict, parameter_model)
|
1096
|
-
self.
|
1098
|
+
self._api_client._verify_response(return_value)
|
1097
1099
|
return return_value
|
1098
1100
|
|
1099
1101
|
async def cancel(
|
@@ -1107,11 +1109,11 @@ class AsyncBatches(_common.BaseModule):
|
|
1107
1109
|
config=config,
|
1108
1110
|
)
|
1109
1111
|
|
1110
|
-
if not self.
|
1112
|
+
if not self._api_client.vertexai:
|
1111
1113
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
1112
1114
|
else:
|
1113
1115
|
request_dict = _CancelBatchJobParameters_to_vertex(
|
1114
|
-
self.
|
1116
|
+
self._api_client, parameter_model
|
1115
1117
|
)
|
1116
1118
|
path = 'batchPredictionJobs/{name}:cancel'.format_map(
|
1117
1119
|
request_dict.get('_url')
|
@@ -1126,7 +1128,7 @@ class AsyncBatches(_common.BaseModule):
|
|
1126
1128
|
request_dict = _common.convert_to_dict(request_dict)
|
1127
1129
|
request_dict = _common.apply_base64_encoding(request_dict)
|
1128
1130
|
|
1129
|
-
response_dict = await self.
|
1131
|
+
response_dict = await self._api_client.async_request(
|
1130
1132
|
'post', path, request_dict, http_options
|
1131
1133
|
)
|
1132
1134
|
|
@@ -1137,11 +1139,11 @@ class AsyncBatches(_common.BaseModule):
|
|
1137
1139
|
config=config,
|
1138
1140
|
)
|
1139
1141
|
|
1140
|
-
if not self.
|
1142
|
+
if not self._api_client.vertexai:
|
1141
1143
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
1142
1144
|
else:
|
1143
1145
|
request_dict = _ListBatchJobParameters_to_vertex(
|
1144
|
-
self.
|
1146
|
+
self._api_client, parameter_model
|
1145
1147
|
)
|
1146
1148
|
path = 'batchPredictionJobs'.format_map(request_dict.get('_url'))
|
1147
1149
|
|
@@ -1154,23 +1156,23 @@ class AsyncBatches(_common.BaseModule):
|
|
1154
1156
|
request_dict = _common.convert_to_dict(request_dict)
|
1155
1157
|
request_dict = _common.apply_base64_encoding(request_dict)
|
1156
1158
|
|
1157
|
-
response_dict = await self.
|
1159
|
+
response_dict = await self._api_client.async_request(
|
1158
1160
|
'get', path, request_dict, http_options
|
1159
1161
|
)
|
1160
1162
|
|
1161
|
-
if self.
|
1163
|
+
if self._api_client.vertexai:
|
1162
1164
|
response_dict = _ListBatchJobResponse_from_vertex(
|
1163
|
-
self.
|
1165
|
+
self._api_client, response_dict
|
1164
1166
|
)
|
1165
1167
|
else:
|
1166
1168
|
response_dict = _ListBatchJobResponse_from_mldev(
|
1167
|
-
self.
|
1169
|
+
self._api_client, response_dict
|
1168
1170
|
)
|
1169
1171
|
|
1170
1172
|
return_value = types.ListBatchJobResponse._from_response(
|
1171
1173
|
response_dict, parameter_model
|
1172
1174
|
)
|
1173
|
-
self.
|
1175
|
+
self._api_client._verify_response(return_value)
|
1174
1176
|
return return_value
|
1175
1177
|
|
1176
1178
|
async def delete(self, *, name: str) -> types.DeleteResourceJob:
|
@@ -1195,11 +1197,11 @@ class AsyncBatches(_common.BaseModule):
|
|
1195
1197
|
name=name,
|
1196
1198
|
)
|
1197
1199
|
|
1198
|
-
if not self.
|
1200
|
+
if not self._api_client.vertexai:
|
1199
1201
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
1200
1202
|
else:
|
1201
1203
|
request_dict = _DeleteBatchJobParameters_to_vertex(
|
1202
|
-
self.
|
1204
|
+
self._api_client, parameter_model
|
1203
1205
|
)
|
1204
1206
|
path = 'batchPredictionJobs/{name}'.format_map(request_dict.get('_url'))
|
1205
1207
|
|
@@ -1212,23 +1214,23 @@ class AsyncBatches(_common.BaseModule):
|
|
1212
1214
|
request_dict = _common.convert_to_dict(request_dict)
|
1213
1215
|
request_dict = _common.apply_base64_encoding(request_dict)
|
1214
1216
|
|
1215
|
-
response_dict = await self.
|
1217
|
+
response_dict = await self._api_client.async_request(
|
1216
1218
|
'delete', path, request_dict, http_options
|
1217
1219
|
)
|
1218
1220
|
|
1219
|
-
if self.
|
1221
|
+
if self._api_client.vertexai:
|
1220
1222
|
response_dict = _DeleteResourceJob_from_vertex(
|
1221
|
-
self.
|
1223
|
+
self._api_client, response_dict
|
1222
1224
|
)
|
1223
1225
|
else:
|
1224
1226
|
response_dict = _DeleteResourceJob_from_mldev(
|
1225
|
-
self.
|
1227
|
+
self._api_client, response_dict
|
1226
1228
|
)
|
1227
1229
|
|
1228
1230
|
return_value = types.DeleteResourceJob._from_response(
|
1229
1231
|
response_dict, parameter_model
|
1230
1232
|
)
|
1231
|
-
self.
|
1233
|
+
self._api_client._verify_response(return_value)
|
1232
1234
|
return return_value
|
1233
1235
|
|
1234
1236
|
async def create(
|