google-genai 1.23.0__tar.gz → 1.24.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. {google_genai-1.23.0/google_genai.egg-info → google_genai-1.24.0}/PKG-INFO +70 -5
  2. {google_genai-1.23.0 → google_genai-1.24.0}/README.md +69 -4
  3. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/_api_client.py +32 -30
  4. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/_live_converters.py +14 -6
  5. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/_tokens_converters.py +6 -0
  6. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/batches.py +84 -12
  7. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/caches.py +6 -0
  8. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/models.py +6 -0
  9. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/tunings.py +12 -0
  10. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/types.py +295 -34
  11. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/version.py +1 -1
  12. {google_genai-1.23.0 → google_genai-1.24.0/google_genai.egg-info}/PKG-INFO +70 -5
  13. {google_genai-1.23.0 → google_genai-1.24.0}/pyproject.toml +1 -1
  14. {google_genai-1.23.0 → google_genai-1.24.0}/LICENSE +0 -0
  15. {google_genai-1.23.0 → google_genai-1.24.0}/MANIFEST.in +0 -0
  16. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/__init__.py +0 -0
  17. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/_adapters.py +0 -0
  18. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/_api_module.py +0 -0
  19. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/_automatic_function_calling_util.py +0 -0
  20. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/_base_url.py +0 -0
  21. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/_common.py +0 -0
  22. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/_extra_utils.py +0 -0
  23. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/_mcp_utils.py +0 -0
  24. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/_replay_api_client.py +0 -0
  25. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/_test_api_client.py +0 -0
  26. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/_transformers.py +0 -0
  27. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/chats.py +0 -0
  28. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/client.py +0 -0
  29. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/errors.py +0 -0
  30. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/files.py +0 -0
  31. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/live.py +0 -0
  32. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/live_music.py +0 -0
  33. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/operations.py +0 -0
  34. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/pagers.py +0 -0
  35. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/py.typed +0 -0
  36. {google_genai-1.23.0 → google_genai-1.24.0}/google/genai/tokens.py +0 -0
  37. {google_genai-1.23.0 → google_genai-1.24.0}/google_genai.egg-info/SOURCES.txt +0 -0
  38. {google_genai-1.23.0 → google_genai-1.24.0}/google_genai.egg-info/dependency_links.txt +0 -0
  39. {google_genai-1.23.0 → google_genai-1.24.0}/google_genai.egg-info/requires.txt +0 -0
  40. {google_genai-1.23.0 → google_genai-1.24.0}/google_genai.egg-info/top_level.txt +0 -0
  41. {google_genai-1.23.0 → google_genai-1.24.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-genai
3
- Version: 1.23.0
3
+ Version: 1.24.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License: Apache-2.0
@@ -1132,9 +1132,9 @@ response3.generated_images[0].image.show()
1132
1132
 
1133
1133
  ### Veo
1134
1134
 
1135
- #### Generate Videos
1135
+ Support for generating videos is considered public preview
1136
1136
 
1137
- Support for generate videos in Vertex and Gemini Developer API is behind an allowlist
1137
+ #### Generate Videos (Text to Video)
1138
1138
 
1139
1139
  ```python
1140
1140
  from google.genai import types
@@ -1145,7 +1145,6 @@ operation = client.models.generate_videos(
1145
1145
  prompt='A neon hologram of a cat driving at top speed',
1146
1146
  config=types.GenerateVideosConfig(
1147
1147
  number_of_videos=1,
1148
- fps=24,
1149
1148
  duration_seconds=5,
1150
1149
  enhance_prompt=True,
1151
1150
  ),
@@ -1156,7 +1155,73 @@ while not operation.done:
1156
1155
  time.sleep(20)
1157
1156
  operation = client.operations.get(operation)
1158
1157
 
1159
- video = operation.result.generated_videos[0].video
1158
+ video = operation.response.generated_videos[0].video
1159
+ video.show()
1160
+ ```
1161
+
1162
+ #### Generate Videos (Image to Video)
1163
+
1164
+ ```python
1165
+ from google.genai import types
1166
+
1167
+ # Read local image (uses mimetypes.guess_type to infer mime type)
1168
+ image = types.Image.from_file("local/path/file.png")
1169
+
1170
+ # Create operation
1171
+ operation = client.models.generate_videos(
1172
+ model='veo-2.0-generate-001',
1173
+ # Prompt is optional if image is provided
1174
+ prompt='Night sky',
1175
+ image=image,
1176
+ config=types.GenerateVideosConfig(
1177
+ number_of_videos=1,
1178
+ duration_seconds=5,
1179
+ enhance_prompt=True,
1180
+ # Can also pass an Image into last_frame for frame interpolation
1181
+ ),
1182
+ )
1183
+
1184
+ # Poll operation
1185
+ while not operation.done:
1186
+ time.sleep(20)
1187
+ operation = client.operations.get(operation)
1188
+
1189
+ video = operation.response.generated_videos[0].video
1190
+ video.show()
1191
+ ```
1192
+
1193
+ #### Generate Videos (Video to Video)
1194
+
1195
+ Currently, only Vertex supports Video to Video generation (Video extension).
1196
+
1197
+ ```python
1198
+ from google.genai import types
1199
+
1200
+ # Read local video (uses mimetypes.guess_type to infer mime type)
1201
+ video = types.Video.from_file("local/path/video.mp4")
1202
+
1203
+ # Create operation
1204
+ operation = client.models.generate_videos(
1205
+ model='veo-2.0-generate-001',
1206
+ # Prompt is optional if Video is provided
1207
+ prompt='Night sky',
1208
+ # Input video must be in GCS
1209
+ video=types.Video(
1210
+ uri="gs://bucket-name/inputs/videos/cat_driving.mp4",
1211
+ ),
1212
+ config=types.GenerateVideosConfig(
1213
+ number_of_videos=1,
1214
+ duration_seconds=5,
1215
+ enhance_prompt=True,
1216
+ ),
1217
+ )
1218
+
1219
+ # Poll operation
1220
+ while not operation.done:
1221
+ time.sleep(20)
1222
+ operation = client.operations.get(operation)
1223
+
1224
+ video = operation.response.generated_videos[0].video
1160
1225
  video.show()
1161
1226
  ```
1162
1227
 
@@ -1098,9 +1098,9 @@ response3.generated_images[0].image.show()
1098
1098
 
1099
1099
  ### Veo
1100
1100
 
1101
- #### Generate Videos
1101
+ Support for generating videos is considered public preview
1102
1102
 
1103
- Support for generate videos in Vertex and Gemini Developer API is behind an allowlist
1103
+ #### Generate Videos (Text to Video)
1104
1104
 
1105
1105
  ```python
1106
1106
  from google.genai import types
@@ -1111,7 +1111,6 @@ operation = client.models.generate_videos(
1111
1111
  prompt='A neon hologram of a cat driving at top speed',
1112
1112
  config=types.GenerateVideosConfig(
1113
1113
  number_of_videos=1,
1114
- fps=24,
1115
1114
  duration_seconds=5,
1116
1115
  enhance_prompt=True,
1117
1116
  ),
@@ -1122,7 +1121,73 @@ while not operation.done:
1122
1121
  time.sleep(20)
1123
1122
  operation = client.operations.get(operation)
1124
1123
 
1125
- video = operation.result.generated_videos[0].video
1124
+ video = operation.response.generated_videos[0].video
1125
+ video.show()
1126
+ ```
1127
+
1128
+ #### Generate Videos (Image to Video)
1129
+
1130
+ ```python
1131
+ from google.genai import types
1132
+
1133
+ # Read local image (uses mimetypes.guess_type to infer mime type)
1134
+ image = types.Image.from_file("local/path/file.png")
1135
+
1136
+ # Create operation
1137
+ operation = client.models.generate_videos(
1138
+ model='veo-2.0-generate-001',
1139
+ # Prompt is optional if image is provided
1140
+ prompt='Night sky',
1141
+ image=image,
1142
+ config=types.GenerateVideosConfig(
1143
+ number_of_videos=1,
1144
+ duration_seconds=5,
1145
+ enhance_prompt=True,
1146
+ # Can also pass an Image into last_frame for frame interpolation
1147
+ ),
1148
+ )
1149
+
1150
+ # Poll operation
1151
+ while not operation.done:
1152
+ time.sleep(20)
1153
+ operation = client.operations.get(operation)
1154
+
1155
+ video = operation.response.generated_videos[0].video
1156
+ video.show()
1157
+ ```
1158
+
1159
+ #### Generate Videos (Video to Video)
1160
+
1161
+ Currently, only Vertex supports Video to Video generation (Video extension).
1162
+
1163
+ ```python
1164
+ from google.genai import types
1165
+
1166
+ # Read local video (uses mimetypes.guess_type to infer mime type)
1167
+ video = types.Video.from_file("local/path/video.mp4")
1168
+
1169
+ # Create operation
1170
+ operation = client.models.generate_videos(
1171
+ model='veo-2.0-generate-001',
1172
+ # Prompt is optional if Video is provided
1173
+ prompt='Night sky',
1174
+ # Input video must be in GCS
1175
+ video=types.Video(
1176
+ uri="gs://bucket-name/inputs/videos/cat_driving.mp4",
1177
+ ),
1178
+ config=types.GenerateVideosConfig(
1179
+ number_of_videos=1,
1180
+ duration_seconds=5,
1181
+ enhance_prompt=True,
1182
+ ),
1183
+ )
1184
+
1185
+ # Poll operation
1186
+ while not operation.done:
1187
+ time.sleep(20)
1188
+ operation = client.operations.get(operation)
1189
+
1190
+ video = operation.response.generated_videos[0].video
1126
1191
  video.show()
1127
1192
  ```
1128
1193
 
@@ -74,8 +74,6 @@ try:
74
74
  except ImportError:
75
75
  pass
76
76
 
77
- # internal comment
78
-
79
77
 
80
78
  if TYPE_CHECKING:
81
79
  from multidict import CIMultiDictProxy
@@ -338,9 +336,11 @@ class HttpResponse:
338
336
 
339
337
  # Default retry options.
340
338
  # The config is based on https://cloud.google.com/storage/docs/retry-strategy.
341
- _RETRY_ATTEMPTS = 3
339
+ # By default, the client will retry 4 times with approximately 1.0, 2.0, 4.0,
340
+ # 8.0 seconds between each attempt.
341
+ _RETRY_ATTEMPTS = 5 # including the initial call.
342
342
  _RETRY_INITIAL_DELAY = 1.0 # seconds
343
- _RETRY_MAX_DELAY = 120.0 # seconds
343
+ _RETRY_MAX_DELAY = 60.0 # seconds
344
344
  _RETRY_EXP_BASE = 2
345
345
  _RETRY_JITTER = 1
346
346
  _RETRY_HTTP_STATUS_CODES = (
@@ -364,14 +364,13 @@ def _retry_args(options: Optional[HttpRetryOptions]) -> dict[str, Any]:
364
364
  The arguments passed to the tenacity.(Async)Retrying constructor.
365
365
  """
366
366
  if options is None:
367
- return {'stop': tenacity.stop_after_attempt(1)}
367
+ return {'stop': tenacity.stop_after_attempt(1), 'reraise': True}
368
368
 
369
369
  stop = tenacity.stop_after_attempt(options.attempts or _RETRY_ATTEMPTS)
370
370
  retriable_codes = options.http_status_codes or _RETRY_HTTP_STATUS_CODES
371
- retry = tenacity.retry_if_result(
372
- lambda response: response.status_code in retriable_codes,
371
+ retry = tenacity.retry_if_exception(
372
+ lambda e: isinstance(e, errors.APIError) and e.code in retriable_codes,
373
373
  )
374
- retry_error_callback = lambda retry_state: retry_state.outcome.result()
375
374
  wait = tenacity.wait_exponential_jitter(
376
375
  initial=options.initial_delay or _RETRY_INITIAL_DELAY,
377
376
  max=options.max_delay or _RETRY_MAX_DELAY,
@@ -381,7 +380,7 @@ def _retry_args(options: Optional[HttpRetryOptions]) -> dict[str, Any]:
381
380
  return {
382
381
  'stop': stop,
383
382
  'retry': retry,
384
- 'retry_error_callback': retry_error_callback,
383
+ 'reraise': True,
385
384
  'wait': wait,
386
385
  }
387
386
 
@@ -569,18 +568,16 @@ class BaseApiClient:
569
568
  )
570
569
  self._httpx_client = SyncHttpxClient(**client_args)
571
570
  self._async_httpx_client = AsyncHttpxClient(**async_client_args)
572
- if has_aiohttp:
571
+ if self._use_aiohttp():
573
572
  # Do it once at the genai.Client level. Share among all requests.
574
573
  self._async_client_session_request_args = self._ensure_aiohttp_ssl_ctx(
575
574
  self._http_options
576
- )
577
- self._websocket_ssl_ctx = self._ensure_websocket_ssl_ctx(
578
- self._http_options
579
- )
575
+ )
576
+ self._websocket_ssl_ctx = self._ensure_websocket_ssl_ctx(self._http_options)
580
577
 
581
578
  retry_kwargs = _retry_args(self._http_options.retry_options)
582
- self._retry = tenacity.Retrying(**retry_kwargs, reraise=True)
583
- self._async_retry = tenacity.AsyncRetrying(**retry_kwargs, reraise=True)
579
+ self._retry = tenacity.Retrying(**retry_kwargs)
580
+ self._async_retry = tenacity.AsyncRetrying(**retry_kwargs)
584
581
 
585
582
  @staticmethod
586
583
  def _ensure_httpx_ssl_ctx(
@@ -706,7 +703,6 @@ class BaseApiClient:
706
703
 
707
704
  return _maybe_set(async_args, ctx)
708
705
 
709
-
710
706
  @staticmethod
711
707
  def _ensure_websocket_ssl_ctx(options: HttpOptions) -> dict[str, Any]:
712
708
  """Ensures the SSL context is present in the async client args.
@@ -762,6 +758,14 @@ class BaseApiClient:
762
758
 
763
759
  return _maybe_set(async_args, ctx)
764
760
 
761
+ def _use_aiohttp(self) -> bool:
762
+ # If the instantiator has passed a custom transport, they want httpx not
763
+ # aiohttp.
764
+ return (
765
+ has_aiohttp
766
+ and (self._http_options.async_client_args or {}).get('transport')
767
+ is None
768
+ )
765
769
 
766
770
  def _websocket_base_url(self) -> str:
767
771
  url_parts = urlparse(self._http_options.base_url)
@@ -975,7 +979,7 @@ class BaseApiClient:
975
979
  data = http_request.data
976
980
 
977
981
  if stream:
978
- if has_aiohttp:
982
+ if self._use_aiohttp():
979
983
  session = aiohttp.ClientSession(
980
984
  headers=http_request.headers,
981
985
  trust_env=True,
@@ -1007,7 +1011,7 @@ class BaseApiClient:
1007
1011
  await errors.APIError.raise_for_async_response(client_response)
1008
1012
  return HttpResponse(client_response.headers, client_response)
1009
1013
  else:
1010
- if has_aiohttp:
1014
+ if self._use_aiohttp():
1011
1015
  async with aiohttp.ClientSession(
1012
1016
  headers=http_request.headers,
1013
1017
  trust_env=True,
@@ -1061,11 +1065,10 @@ class BaseApiClient:
1061
1065
  http_method, path, request_dict, http_options
1062
1066
  )
1063
1067
  response = self._request(http_request, stream=False)
1064
- response_body = response.response_stream[0] if response.response_stream else ''
1065
- return SdkHttpResponse(
1066
- headers=response.headers, body=response_body
1068
+ response_body = (
1069
+ response.response_stream[0] if response.response_stream else ''
1067
1070
  )
1068
-
1071
+ return SdkHttpResponse(headers=response.headers, body=response_body)
1069
1072
 
1070
1073
  def request_streamed(
1071
1074
  self,
@@ -1080,7 +1083,9 @@ class BaseApiClient:
1080
1083
 
1081
1084
  session_response = self._request(http_request, stream=True)
1082
1085
  for chunk in session_response.segments():
1083
- yield SdkHttpResponse(headers=session_response.headers, body=json.dumps(chunk))
1086
+ yield SdkHttpResponse(
1087
+ headers=session_response.headers, body=json.dumps(chunk)
1088
+ )
1084
1089
 
1085
1090
  async def async_request(
1086
1091
  self,
@@ -1095,10 +1100,7 @@ class BaseApiClient:
1095
1100
 
1096
1101
  result = await self._async_request(http_request=http_request, stream=False)
1097
1102
  response_body = result.response_stream[0] if result.response_stream else ''
1098
- return SdkHttpResponse(
1099
- headers=result.headers, body=response_body
1100
- )
1101
-
1103
+ return SdkHttpResponse(headers=result.headers, body=response_body)
1102
1104
 
1103
1105
  async def async_request_streamed(
1104
1106
  self,
@@ -1324,7 +1326,7 @@ class BaseApiClient:
1324
1326
  """
1325
1327
  offset = 0
1326
1328
  # Upload the file in chunks
1327
- if has_aiohttp: # pylint: disable=g-import-not-at-top
1329
+ if self._use_aiohttp(): # pylint: disable=g-import-not-at-top
1328
1330
  async with aiohttp.ClientSession(
1329
1331
  headers=self._http_options.headers,
1330
1332
  trust_env=True,
@@ -1507,7 +1509,7 @@ class BaseApiClient:
1507
1509
  else:
1508
1510
  data = http_request.data
1509
1511
 
1510
- if has_aiohttp:
1512
+ if self._use_aiohttp():
1511
1513
  async with aiohttp.ClientSession(
1512
1514
  headers=http_request.headers,
1513
1515
  trust_env=True,
@@ -877,6 +877,9 @@ def _Tool_to_mldev(
877
877
  if getv(from_object, ['code_execution']) is not None:
878
878
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
879
879
 
880
+ if getv(from_object, ['computer_use']) is not None:
881
+ setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
882
+
880
883
  return to_object
881
884
 
882
885
 
@@ -942,6 +945,9 @@ def _Tool_to_vertex(
942
945
  if getv(from_object, ['code_execution']) is not None:
943
946
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
944
947
 
948
+ if getv(from_object, ['computer_use']) is not None:
949
+ setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
950
+
945
951
  return to_object
946
952
 
947
953
 
@@ -1649,16 +1655,16 @@ def _LiveSendRealtimeInputParameters_to_vertex(
1649
1655
  setv(to_object, ['mediaChunks'], t.t_blobs(getv(from_object, ['media'])))
1650
1656
 
1651
1657
  if getv(from_object, ['audio']) is not None:
1652
- raise ValueError('audio parameter is not supported in Vertex AI.')
1658
+ setv(to_object, ['audio'], t.t_audio_blob(getv(from_object, ['audio'])))
1653
1659
 
1654
1660
  if getv(from_object, ['audio_stream_end']) is not None:
1655
1661
  setv(to_object, ['audioStreamEnd'], getv(from_object, ['audio_stream_end']))
1656
1662
 
1657
1663
  if getv(from_object, ['video']) is not None:
1658
- raise ValueError('video parameter is not supported in Vertex AI.')
1664
+ setv(to_object, ['video'], t.t_image_blob(getv(from_object, ['video'])))
1659
1665
 
1660
1666
  if getv(from_object, ['text']) is not None:
1661
- raise ValueError('text parameter is not supported in Vertex AI.')
1667
+ setv(to_object, ['text'], getv(from_object, ['text']))
1662
1668
 
1663
1669
  if getv(from_object, ['activity_start']) is not None:
1664
1670
  setv(
@@ -1935,7 +1941,7 @@ def _LiveClientRealtimeInput_to_vertex(
1935
1941
  setv(to_object, ['mediaChunks'], getv(from_object, ['media_chunks']))
1936
1942
 
1937
1943
  if getv(from_object, ['audio']) is not None:
1938
- raise ValueError('audio parameter is not supported in Vertex AI.')
1944
+ setv(to_object, ['audio'], getv(from_object, ['audio']))
1939
1945
 
1940
1946
  if getv(from_object, ['audio_stream_end']) is not None:
1941
1947
  raise ValueError(
@@ -1943,10 +1949,10 @@ def _LiveClientRealtimeInput_to_vertex(
1943
1949
  )
1944
1950
 
1945
1951
  if getv(from_object, ['video']) is not None:
1946
- raise ValueError('video parameter is not supported in Vertex AI.')
1952
+ setv(to_object, ['video'], getv(from_object, ['video']))
1947
1953
 
1948
1954
  if getv(from_object, ['text']) is not None:
1949
- raise ValueError('text parameter is not supported in Vertex AI.')
1955
+ setv(to_object, ['text'], getv(from_object, ['text']))
1950
1956
 
1951
1957
  if getv(from_object, ['activity_start']) is not None:
1952
1958
  setv(
@@ -2467,6 +2473,8 @@ def _LiveServerSetupComplete_from_vertex(
2467
2473
  parent_object: Optional[dict[str, Any]] = None,
2468
2474
  ) -> dict[str, Any]:
2469
2475
  to_object: dict[str, Any] = {}
2476
+ if getv(from_object, ['sessionId']) is not None:
2477
+ setv(to_object, ['session_id'], getv(from_object, ['sessionId']))
2470
2478
 
2471
2479
  return to_object
2472
2480
 
@@ -877,6 +877,9 @@ def _Tool_to_mldev(
877
877
  if getv(from_object, ['code_execution']) is not None:
878
878
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
879
879
 
880
+ if getv(from_object, ['computer_use']) is not None:
881
+ setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
882
+
880
883
  return to_object
881
884
 
882
885
 
@@ -942,6 +945,9 @@ def _Tool_to_vertex(
942
945
  if getv(from_object, ['code_execution']) is not None:
943
946
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
944
947
 
948
+ if getv(from_object, ['computer_use']) is not None:
949
+ setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
950
+
945
951
  return to_object
946
952
 
947
953
 
@@ -515,6 +515,9 @@ def _Tool_to_mldev(
515
515
  if getv(from_object, ['code_execution']) is not None:
516
516
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
517
517
 
518
+ if getv(from_object, ['computer_use']) is not None:
519
+ setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
520
+
518
521
  return to_object
519
522
 
520
523
 
@@ -1288,6 +1291,25 @@ def _ListBatchJobsParameters_to_mldev(
1288
1291
  return to_object
1289
1292
 
1290
1293
 
1294
+ def _DeleteBatchJobParameters_to_mldev(
1295
+ api_client: BaseApiClient,
1296
+ from_object: Union[dict[str, Any], object],
1297
+ parent_object: Optional[dict[str, Any]] = None,
1298
+ ) -> dict[str, Any]:
1299
+ to_object: dict[str, Any] = {}
1300
+ if getv(from_object, ['name']) is not None:
1301
+ setv(
1302
+ to_object,
1303
+ ['_url', 'name'],
1304
+ t.t_batch_job_name(api_client, getv(from_object, ['name'])),
1305
+ )
1306
+
1307
+ if getv(from_object, ['config']) is not None:
1308
+ setv(to_object, ['config'], getv(from_object, ['config']))
1309
+
1310
+ return to_object
1311
+
1312
+
1291
1313
  def _VideoMetadata_to_vertex(
1292
1314
  from_object: Union[dict[str, Any], object],
1293
1315
  parent_object: Optional[dict[str, Any]] = None,
@@ -1796,6 +1818,9 @@ def _Tool_to_vertex(
1796
1818
  if getv(from_object, ['code_execution']) is not None:
1797
1819
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
1798
1820
 
1821
+ if getv(from_object, ['computer_use']) is not None:
1822
+ setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
1823
+
1799
1824
  return to_object
1800
1825
 
1801
1826
 
@@ -3036,6 +3061,9 @@ def _Tool_from_mldev(
3036
3061
  if getv(from_object, ['codeExecution']) is not None:
3037
3062
  setv(to_object, ['code_execution'], getv(from_object, ['codeExecution']))
3038
3063
 
3064
+ if getv(from_object, ['computerUse']) is not None:
3065
+ setv(to_object, ['computer_use'], getv(from_object, ['computerUse']))
3066
+
3039
3067
  return to_object
3040
3068
 
3041
3069
 
@@ -3697,6 +3725,27 @@ def _ListBatchJobsResponse_from_mldev(
3697
3725
  return to_object
3698
3726
 
3699
3727
 
3728
+ def _DeleteResourceJob_from_mldev(
3729
+ from_object: Union[dict[str, Any], object],
3730
+ parent_object: Optional[dict[str, Any]] = None,
3731
+ ) -> dict[str, Any]:
3732
+ to_object: dict[str, Any] = {}
3733
+ if getv(from_object, ['name']) is not None:
3734
+ setv(to_object, ['name'], getv(from_object, ['name']))
3735
+
3736
+ if getv(from_object, ['done']) is not None:
3737
+ setv(to_object, ['done'], getv(from_object, ['done']))
3738
+
3739
+ if getv(from_object, ['error']) is not None:
3740
+ setv(
3741
+ to_object,
3742
+ ['error'],
3743
+ _JobError_from_mldev(getv(from_object, ['error']), to_object),
3744
+ )
3745
+
3746
+ return to_object
3747
+
3748
+
3700
3749
  def _JobError_from_vertex(
3701
3750
  from_object: Union[dict[str, Any], object],
3702
3751
  parent_object: Optional[dict[str, Any]] = None,
@@ -4220,6 +4269,9 @@ def _Tool_from_vertex(
4220
4269
  if getv(from_object, ['codeExecution']) is not None:
4221
4270
  setv(to_object, ['code_execution'], getv(from_object, ['codeExecution']))
4222
4271
 
4272
+ if getv(from_object, ['computerUse']) is not None:
4273
+ setv(to_object, ['computer_use'], getv(from_object, ['computerUse']))
4274
+
4223
4275
  return to_object
4224
4276
 
4225
4277
 
@@ -4937,7 +4989,7 @@ class Batches(_api_module.BaseModule):
4937
4989
  name (str): A fully-qualified BatchJob resource name or ID.
4938
4990
  Example: "projects/.../locations/.../batchPredictionJobs/456" or "456"
4939
4991
  when project and location are initialized in the Vertex AI client. Or
4940
- "files/abc" using the Gemini Developer AI client.
4992
+ "batches/abc" using the Gemini Developer AI client.
4941
4993
 
4942
4994
  Returns:
4943
4995
  A BatchJob object that contains details about the batch job.
@@ -5022,7 +5074,7 @@ class Batches(_api_module.BaseModule):
5022
5074
  name (str): A fully-qualified BatchJob resource name or ID.
5023
5075
  Example: "projects/.../locations/.../batchPredictionJobs/456" or "456"
5024
5076
  when project and location are initialized in the Vertex AI client. Or
5025
- "files/abc" using the Gemini Developer AI client.
5077
+ "batches/abc" using the Gemini Developer AI client.
5026
5078
 
5027
5079
  Usage:
5028
5080
 
@@ -5161,9 +5213,8 @@ class Batches(_api_module.BaseModule):
5161
5213
  )
5162
5214
 
5163
5215
  request_url_dict: Optional[dict[str, str]]
5164
- if not self._api_client.vertexai:
5165
- raise ValueError('This method is only supported in the Vertex AI client.')
5166
- else:
5216
+
5217
+ if self._api_client.vertexai:
5167
5218
  request_dict = _DeleteBatchJobParameters_to_vertex(
5168
5219
  self._api_client, parameter_model
5169
5220
  )
@@ -5172,7 +5223,15 @@ class Batches(_api_module.BaseModule):
5172
5223
  path = 'batchPredictionJobs/{name}'.format_map(request_url_dict)
5173
5224
  else:
5174
5225
  path = 'batchPredictionJobs/{name}'
5175
-
5226
+ else:
5227
+ request_dict = _DeleteBatchJobParameters_to_mldev(
5228
+ self._api_client, parameter_model
5229
+ )
5230
+ request_url_dict = request_dict.get('_url')
5231
+ if request_url_dict:
5232
+ path = 'batches/{name}'.format_map(request_url_dict)
5233
+ else:
5234
+ path = 'batches/{name}'
5176
5235
  query_params = request_dict.get('_query')
5177
5236
  if query_params:
5178
5237
  path = f'{path}?{urlencode(query_params)}'
@@ -5198,6 +5257,9 @@ class Batches(_api_module.BaseModule):
5198
5257
  if self._api_client.vertexai:
5199
5258
  response_dict = _DeleteResourceJob_from_vertex(response_dict)
5200
5259
 
5260
+ else:
5261
+ response_dict = _DeleteResourceJob_from_mldev(response_dict)
5262
+
5201
5263
  return_value = types.DeleteResourceJob._from_response(
5202
5264
  response=response_dict, kwargs=parameter_model.model_dump()
5203
5265
  )
@@ -5354,7 +5416,7 @@ class AsyncBatches(_api_module.BaseModule):
5354
5416
  name (str): A fully-qualified BatchJob resource name or ID.
5355
5417
  Example: "projects/.../locations/.../batchPredictionJobs/456" or "456"
5356
5418
  when project and location are initialized in the Vertex AI client. Or
5357
- "files/abc" using the Gemini Developer AI client.
5419
+ "batches/abc" using the Gemini Developer AI client.
5358
5420
 
5359
5421
  Returns:
5360
5422
  A BatchJob object that contains details about the batch job.
@@ -5441,7 +5503,7 @@ class AsyncBatches(_api_module.BaseModule):
5441
5503
  name (str): A fully-qualified BatchJob resource name or ID.
5442
5504
  Example: "projects/.../locations/.../batchPredictionJobs/456" or "456"
5443
5505
  when project and location are initialized in the Vertex AI client. Or
5444
- "files/abc" using the Gemini Developer AI client.
5506
+ "batches/abc" using the Gemini Developer AI client.
5445
5507
 
5446
5508
  Usage:
5447
5509
 
@@ -5582,9 +5644,8 @@ class AsyncBatches(_api_module.BaseModule):
5582
5644
  )
5583
5645
 
5584
5646
  request_url_dict: Optional[dict[str, str]]
5585
- if not self._api_client.vertexai:
5586
- raise ValueError('This method is only supported in the Vertex AI client.')
5587
- else:
5647
+
5648
+ if self._api_client.vertexai:
5588
5649
  request_dict = _DeleteBatchJobParameters_to_vertex(
5589
5650
  self._api_client, parameter_model
5590
5651
  )
@@ -5593,7 +5654,15 @@ class AsyncBatches(_api_module.BaseModule):
5593
5654
  path = 'batchPredictionJobs/{name}'.format_map(request_url_dict)
5594
5655
  else:
5595
5656
  path = 'batchPredictionJobs/{name}'
5596
-
5657
+ else:
5658
+ request_dict = _DeleteBatchJobParameters_to_mldev(
5659
+ self._api_client, parameter_model
5660
+ )
5661
+ request_url_dict = request_dict.get('_url')
5662
+ if request_url_dict:
5663
+ path = 'batches/{name}'.format_map(request_url_dict)
5664
+ else:
5665
+ path = 'batches/{name}'
5597
5666
  query_params = request_dict.get('_query')
5598
5667
  if query_params:
5599
5668
  path = f'{path}?{urlencode(query_params)}'
@@ -5619,6 +5688,9 @@ class AsyncBatches(_api_module.BaseModule):
5619
5688
  if self._api_client.vertexai:
5620
5689
  response_dict = _DeleteResourceJob_from_vertex(response_dict)
5621
5690
 
5691
+ else:
5692
+ response_dict = _DeleteResourceJob_from_mldev(response_dict)
5693
+
5622
5694
  return_value = types.DeleteResourceJob._from_response(
5623
5695
  response=response_dict, kwargs=parameter_model.model_dump()
5624
5696
  )
@@ -395,6 +395,9 @@ def _Tool_to_mldev(
395
395
  if getv(from_object, ['code_execution']) is not None:
396
396
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
397
397
 
398
+ if getv(from_object, ['computer_use']) is not None:
399
+ setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
400
+
398
401
  return to_object
399
402
 
400
403
 
@@ -1057,6 +1060,9 @@ def _Tool_to_vertex(
1057
1060
  if getv(from_object, ['code_execution']) is not None:
1058
1061
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
1059
1062
 
1063
+ if getv(from_object, ['computer_use']) is not None:
1064
+ setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
1065
+
1060
1066
  return to_object
1061
1067
 
1062
1068