google-genai 1.22.0__py3-none-any.whl → 1.24.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -60,6 +60,11 @@ from .types import HttpOptionsOrDict
60
60
  from .types import HttpResponse as SdkHttpResponse
61
61
  from .types import HttpRetryOptions
62
62
 
63
+ try:
64
+ from websockets.asyncio.client import connect as ws_connect
65
+ except ModuleNotFoundError:
66
+ # This try/except is for TAP, mypy complains about it which is why we have the type: ignore
67
+ from websockets.client import connect as ws_connect # type: ignore
63
68
 
64
69
  has_aiohttp = False
65
70
  try:
@@ -69,8 +74,6 @@ try:
69
74
  except ImportError:
70
75
  pass
71
76
 
72
- # internal comment
73
-
74
77
 
75
78
  if TYPE_CHECKING:
76
79
  from multidict import CIMultiDictProxy
@@ -227,11 +230,13 @@ class HttpResponse:
227
230
  headers: Union[dict[str, str], httpx.Headers, 'CIMultiDictProxy[str]'],
228
231
  response_stream: Union[Any, str] = None,
229
232
  byte_stream: Union[Any, bytes] = None,
233
+ session: Optional['aiohttp.ClientSession'] = None,
230
234
  ):
231
235
  self.status_code: int = 200
232
236
  self.headers = headers
233
237
  self.response_stream = response_stream
234
238
  self.byte_stream = byte_stream
239
+ self._session = session
235
240
 
236
241
  # Async iterator for async streaming.
237
242
  def __aiter__(self) -> 'HttpResponse':
@@ -291,16 +296,23 @@ class HttpResponse:
291
296
  chunk = chunk[len('data: ') :]
292
297
  yield json.loads(chunk)
293
298
  elif hasattr(self.response_stream, 'content'):
294
- async for chunk in self.response_stream.content.iter_any():
295
- # This is aiohttp.ClientResponse.
296
- if chunk:
299
+ # This is aiohttp.ClientResponse.
300
+ try:
301
+ while True:
302
+ chunk = await self.response_stream.content.readline()
303
+ if not chunk:
304
+ break
297
305
  # In async streaming mode, the chunk of JSON is prefixed with
298
306
  # "data:" which we must strip before parsing.
299
- if not isinstance(chunk, str):
300
- chunk = chunk.decode('utf-8')
307
+ chunk = chunk.decode('utf-8')
301
308
  if chunk.startswith('data: '):
302
309
  chunk = chunk[len('data: ') :]
303
- yield json.loads(chunk)
310
+ chunk = chunk.strip()
311
+ if chunk:
312
+ yield json.loads(chunk)
313
+ finally:
314
+ if hasattr(self, '_session') and self._session:
315
+ await self._session.close()
304
316
  else:
305
317
  raise ValueError('Error parsing streaming response.')
306
318
 
@@ -324,9 +336,11 @@ class HttpResponse:
324
336
 
325
337
  # Default retry options.
326
338
  # The config is based on https://cloud.google.com/storage/docs/retry-strategy.
327
- _RETRY_ATTEMPTS = 3
339
+ # By default, the client will retry 4 times with approximately 1.0, 2.0, 4.0,
340
+ # 8.0 seconds between each attempt.
341
+ _RETRY_ATTEMPTS = 5 # including the initial call.
328
342
  _RETRY_INITIAL_DELAY = 1.0 # seconds
329
- _RETRY_MAX_DELAY = 120.0 # seconds
343
+ _RETRY_MAX_DELAY = 60.0 # seconds
330
344
  _RETRY_EXP_BASE = 2
331
345
  _RETRY_JITTER = 1
332
346
  _RETRY_HTTP_STATUS_CODES = (
@@ -350,14 +364,13 @@ def _retry_args(options: Optional[HttpRetryOptions]) -> dict[str, Any]:
350
364
  The arguments passed to the tenacity.(Async)Retrying constructor.
351
365
  """
352
366
  if options is None:
353
- return {'stop': tenacity.stop_after_attempt(1)}
367
+ return {'stop': tenacity.stop_after_attempt(1), 'reraise': True}
354
368
 
355
369
  stop = tenacity.stop_after_attempt(options.attempts or _RETRY_ATTEMPTS)
356
370
  retriable_codes = options.http_status_codes or _RETRY_HTTP_STATUS_CODES
357
- retry = tenacity.retry_if_result(
358
- lambda response: response.status_code in retriable_codes,
371
+ retry = tenacity.retry_if_exception(
372
+ lambda e: isinstance(e, errors.APIError) and e.code in retriable_codes,
359
373
  )
360
- retry_error_callback = lambda retry_state: retry_state.outcome.result()
361
374
  wait = tenacity.wait_exponential_jitter(
362
375
  initial=options.initial_delay or _RETRY_INITIAL_DELAY,
363
376
  max=options.max_delay or _RETRY_MAX_DELAY,
@@ -367,7 +380,7 @@ def _retry_args(options: Optional[HttpRetryOptions]) -> dict[str, Any]:
367
380
  return {
368
381
  'stop': stop,
369
382
  'retry': retry,
370
- 'retry_error_callback': retry_error_callback,
383
+ 'reraise': True,
371
384
  'wait': wait,
372
385
  }
373
386
 
@@ -538,6 +551,7 @@ class BaseApiClient:
538
551
  # Default options for both clients.
539
552
  self._http_options.headers = {'Content-Type': 'application/json'}
540
553
  if self.api_key:
554
+ self.api_key = self.api_key.strip()
541
555
  if self._http_options.headers is not None:
542
556
  self._http_options.headers['x-goog-api-key'] = self.api_key
543
557
  # Update the http options with the user provided http options.
@@ -554,15 +568,16 @@ class BaseApiClient:
554
568
  )
555
569
  self._httpx_client = SyncHttpxClient(**client_args)
556
570
  self._async_httpx_client = AsyncHttpxClient(**async_client_args)
557
- if has_aiohttp:
571
+ if self._use_aiohttp():
558
572
  # Do it once at the genai.Client level. Share among all requests.
559
573
  self._async_client_session_request_args = self._ensure_aiohttp_ssl_ctx(
560
574
  self._http_options
561
575
  )
576
+ self._websocket_ssl_ctx = self._ensure_websocket_ssl_ctx(self._http_options)
562
577
 
563
578
  retry_kwargs = _retry_args(self._http_options.retry_options)
564
- self._retry = tenacity.Retrying(**retry_kwargs, reraise=True)
565
- self._async_retry = tenacity.AsyncRetrying(**retry_kwargs, reraise=True)
579
+ self._retry = tenacity.Retrying(**retry_kwargs)
580
+ self._async_retry = tenacity.AsyncRetrying(**retry_kwargs)
566
581
 
567
582
  @staticmethod
568
583
  def _ensure_httpx_ssl_ctx(
@@ -688,6 +703,70 @@ class BaseApiClient:
688
703
 
689
704
  return _maybe_set(async_args, ctx)
690
705
 
706
+ @staticmethod
707
+ def _ensure_websocket_ssl_ctx(options: HttpOptions) -> dict[str, Any]:
708
+ """Ensures the SSL context is present in the async client args.
709
+
710
+ Creates a default SSL context if one is not provided.
711
+
712
+ Args:
713
+ options: The http options to check for SSL context.
714
+
715
+ Returns:
716
+ An async aiohttp ClientSession._request args.
717
+ """
718
+
719
+ verify = 'ssl' # keep it consistent with httpx.
720
+ async_args = options.async_client_args
721
+ ctx = async_args.get(verify) if async_args else None
722
+
723
+ if not ctx:
724
+ # Initialize the SSL context for the httpx client.
725
+ # Unlike requests, the aiohttp package does not automatically pull in the
726
+ # environment variables SSL_CERT_FILE or SSL_CERT_DIR. They need to be
727
+ # enabled explicitly. Instead of 'verify' at client level in httpx,
728
+ # aiohttp uses 'ssl' at request level.
729
+ ctx = ssl.create_default_context(
730
+ cafile=os.environ.get('SSL_CERT_FILE', certifi.where()),
731
+ capath=os.environ.get('SSL_CERT_DIR'),
732
+ )
733
+
734
+ def _maybe_set(
735
+ args: Optional[dict[str, Any]],
736
+ ctx: ssl.SSLContext,
737
+ ) -> dict[str, Any]:
738
+ """Sets the SSL context in the client args if not set.
739
+
740
+ Does not override the SSL context if it is already set.
741
+
742
+ Args:
743
+ args: The client args to to check for SSL context.
744
+ ctx: The SSL context to set.
745
+
746
+ Returns:
747
+ The client args with the SSL context included.
748
+ """
749
+ if not args or not args.get(verify):
750
+ args = (args or {}).copy()
751
+ args[verify] = ctx
752
+ # Drop the args that isn't in the aiohttp RequestOptions.
753
+ copied_args = args.copy()
754
+ for key in copied_args.copy():
755
+ if key not in inspect.signature(ws_connect).parameters and key != 'ssl':
756
+ del copied_args[key]
757
+ return copied_args
758
+
759
+ return _maybe_set(async_args, ctx)
760
+
761
+ def _use_aiohttp(self) -> bool:
762
+ # If the instantiator has passed a custom transport, they want httpx not
763
+ # aiohttp.
764
+ return (
765
+ has_aiohttp
766
+ and (self._http_options.async_client_args or {}).get('transport')
767
+ is None
768
+ )
769
+
691
770
  def _websocket_base_url(self) -> str:
692
771
  url_parts = urlparse(self._http_options.base_url)
693
772
  return url_parts._replace(scheme='wss').geturl() # type: ignore[arg-type, return-value]
@@ -882,6 +961,7 @@ class BaseApiClient:
882
961
  self, http_request: HttpRequest, stream: bool = False
883
962
  ) -> HttpResponse:
884
963
  data: Optional[Union[str, bytes]] = None
964
+
885
965
  if self.vertexai and not self.api_key:
886
966
  http_request.headers['Authorization'] = (
887
967
  f'Bearer {await self._async_access_token()}'
@@ -899,7 +979,7 @@ class BaseApiClient:
899
979
  data = http_request.data
900
980
 
901
981
  if stream:
902
- if has_aiohttp:
982
+ if self._use_aiohttp():
903
983
  session = aiohttp.ClientSession(
904
984
  headers=http_request.headers,
905
985
  trust_env=True,
@@ -912,8 +992,9 @@ class BaseApiClient:
912
992
  timeout=aiohttp.ClientTimeout(connect=http_request.timeout),
913
993
  **self._async_client_session_request_args,
914
994
  )
995
+
915
996
  await errors.APIError.raise_for_async_response(response)
916
- return HttpResponse(response.headers, response)
997
+ return HttpResponse(response.headers, response, session=session)
917
998
  else:
918
999
  # aiohttp is not available. Fall back to httpx.
919
1000
  httpx_request = self._async_httpx_client.build_request(
@@ -930,7 +1011,7 @@ class BaseApiClient:
930
1011
  await errors.APIError.raise_for_async_response(client_response)
931
1012
  return HttpResponse(client_response.headers, client_response)
932
1013
  else:
933
- if has_aiohttp:
1014
+ if self._use_aiohttp():
934
1015
  async with aiohttp.ClientSession(
935
1016
  headers=http_request.headers,
936
1017
  trust_env=True,
@@ -984,11 +1065,10 @@ class BaseApiClient:
984
1065
  http_method, path, request_dict, http_options
985
1066
  )
986
1067
  response = self._request(http_request, stream=False)
987
- response_body = response.response_stream[0] if response.response_stream else ''
988
- return SdkHttpResponse(
989
- headers=response.headers, body=response_body
1068
+ response_body = (
1069
+ response.response_stream[0] if response.response_stream else ''
990
1070
  )
991
-
1071
+ return SdkHttpResponse(headers=response.headers, body=response_body)
992
1072
 
993
1073
  def request_streamed(
994
1074
  self,
@@ -1003,7 +1083,9 @@ class BaseApiClient:
1003
1083
 
1004
1084
  session_response = self._request(http_request, stream=True)
1005
1085
  for chunk in session_response.segments():
1006
- yield SdkHttpResponse(headers=session_response.headers, body=json.dumps(chunk))
1086
+ yield SdkHttpResponse(
1087
+ headers=session_response.headers, body=json.dumps(chunk)
1088
+ )
1007
1089
 
1008
1090
  async def async_request(
1009
1091
  self,
@@ -1018,10 +1100,7 @@ class BaseApiClient:
1018
1100
 
1019
1101
  result = await self._async_request(http_request=http_request, stream=False)
1020
1102
  response_body = result.response_stream[0] if result.response_stream else ''
1021
- return SdkHttpResponse(
1022
- headers=result.headers, body=response_body
1023
- )
1024
-
1103
+ return SdkHttpResponse(headers=result.headers, body=response_body)
1025
1104
 
1026
1105
  async def async_request_streamed(
1027
1106
  self,
@@ -1247,7 +1326,7 @@ class BaseApiClient:
1247
1326
  """
1248
1327
  offset = 0
1249
1328
  # Upload the file in chunks
1250
- if has_aiohttp: # pylint: disable=g-import-not-at-top
1329
+ if self._use_aiohttp(): # pylint: disable=g-import-not-at-top
1251
1330
  async with aiohttp.ClientSession(
1252
1331
  headers=self._http_options.headers,
1253
1332
  trust_env=True,
@@ -1430,7 +1509,7 @@ class BaseApiClient:
1430
1509
  else:
1431
1510
  data = http_request.data
1432
1511
 
1433
- if has_aiohttp:
1512
+ if self._use_aiohttp():
1434
1513
  async with aiohttp.ClientSession(
1435
1514
  headers=http_request.headers,
1436
1515
  trust_env=True,
@@ -877,6 +877,9 @@ def _Tool_to_mldev(
877
877
  if getv(from_object, ['code_execution']) is not None:
878
878
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
879
879
 
880
+ if getv(from_object, ['computer_use']) is not None:
881
+ setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
882
+
880
883
  return to_object
881
884
 
882
885
 
@@ -942,6 +945,9 @@ def _Tool_to_vertex(
942
945
  if getv(from_object, ['code_execution']) is not None:
943
946
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
944
947
 
948
+ if getv(from_object, ['computer_use']) is not None:
949
+ setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
950
+
945
951
  return to_object
946
952
 
947
953
 
@@ -1649,16 +1655,16 @@ def _LiveSendRealtimeInputParameters_to_vertex(
1649
1655
  setv(to_object, ['mediaChunks'], t.t_blobs(getv(from_object, ['media'])))
1650
1656
 
1651
1657
  if getv(from_object, ['audio']) is not None:
1652
- raise ValueError('audio parameter is not supported in Vertex AI.')
1658
+ setv(to_object, ['audio'], t.t_audio_blob(getv(from_object, ['audio'])))
1653
1659
 
1654
1660
  if getv(from_object, ['audio_stream_end']) is not None:
1655
1661
  setv(to_object, ['audioStreamEnd'], getv(from_object, ['audio_stream_end']))
1656
1662
 
1657
1663
  if getv(from_object, ['video']) is not None:
1658
- raise ValueError('video parameter is not supported in Vertex AI.')
1664
+ setv(to_object, ['video'], t.t_image_blob(getv(from_object, ['video'])))
1659
1665
 
1660
1666
  if getv(from_object, ['text']) is not None:
1661
- raise ValueError('text parameter is not supported in Vertex AI.')
1667
+ setv(to_object, ['text'], getv(from_object, ['text']))
1662
1668
 
1663
1669
  if getv(from_object, ['activity_start']) is not None:
1664
1670
  setv(
@@ -1935,7 +1941,7 @@ def _LiveClientRealtimeInput_to_vertex(
1935
1941
  setv(to_object, ['mediaChunks'], getv(from_object, ['media_chunks']))
1936
1942
 
1937
1943
  if getv(from_object, ['audio']) is not None:
1938
- raise ValueError('audio parameter is not supported in Vertex AI.')
1944
+ setv(to_object, ['audio'], getv(from_object, ['audio']))
1939
1945
 
1940
1946
  if getv(from_object, ['audio_stream_end']) is not None:
1941
1947
  raise ValueError(
@@ -1943,10 +1949,10 @@ def _LiveClientRealtimeInput_to_vertex(
1943
1949
  )
1944
1950
 
1945
1951
  if getv(from_object, ['video']) is not None:
1946
- raise ValueError('video parameter is not supported in Vertex AI.')
1952
+ setv(to_object, ['video'], getv(from_object, ['video']))
1947
1953
 
1948
1954
  if getv(from_object, ['text']) is not None:
1949
- raise ValueError('text parameter is not supported in Vertex AI.')
1955
+ setv(to_object, ['text'], getv(from_object, ['text']))
1950
1956
 
1951
1957
  if getv(from_object, ['activity_start']) is not None:
1952
1958
  setv(
@@ -2467,6 +2473,8 @@ def _LiveServerSetupComplete_from_vertex(
2467
2473
  parent_object: Optional[dict[str, Any]] = None,
2468
2474
  ) -> dict[str, Any]:
2469
2475
  to_object: dict[str, Any] = {}
2476
+ if getv(from_object, ['sessionId']) is not None:
2477
+ setv(to_object, ['session_id'], getv(from_object, ['sessionId']))
2470
2478
 
2471
2479
  return to_object
2472
2480
 
@@ -877,6 +877,9 @@ def _Tool_to_mldev(
877
877
  if getv(from_object, ['code_execution']) is not None:
878
878
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
879
879
 
880
+ if getv(from_object, ['computer_use']) is not None:
881
+ setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
882
+
880
883
  return to_object
881
884
 
882
885
 
@@ -942,6 +945,9 @@ def _Tool_to_vertex(
942
945
  if getv(from_object, ['code_execution']) is not None:
943
946
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
944
947
 
948
+ if getv(from_object, ['computer_use']) is not None:
949
+ setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
950
+
945
951
  return to_object
946
952
 
947
953
 
google/genai/batches.py CHANGED
@@ -515,6 +515,9 @@ def _Tool_to_mldev(
515
515
  if getv(from_object, ['code_execution']) is not None:
516
516
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
517
517
 
518
+ if getv(from_object, ['computer_use']) is not None:
519
+ setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
520
+
518
521
  return to_object
519
522
 
520
523
 
@@ -1288,6 +1291,25 @@ def _ListBatchJobsParameters_to_mldev(
1288
1291
  return to_object
1289
1292
 
1290
1293
 
1294
+ def _DeleteBatchJobParameters_to_mldev(
1295
+ api_client: BaseApiClient,
1296
+ from_object: Union[dict[str, Any], object],
1297
+ parent_object: Optional[dict[str, Any]] = None,
1298
+ ) -> dict[str, Any]:
1299
+ to_object: dict[str, Any] = {}
1300
+ if getv(from_object, ['name']) is not None:
1301
+ setv(
1302
+ to_object,
1303
+ ['_url', 'name'],
1304
+ t.t_batch_job_name(api_client, getv(from_object, ['name'])),
1305
+ )
1306
+
1307
+ if getv(from_object, ['config']) is not None:
1308
+ setv(to_object, ['config'], getv(from_object, ['config']))
1309
+
1310
+ return to_object
1311
+
1312
+
1291
1313
  def _VideoMetadata_to_vertex(
1292
1314
  from_object: Union[dict[str, Any], object],
1293
1315
  parent_object: Optional[dict[str, Any]] = None,
@@ -1796,6 +1818,9 @@ def _Tool_to_vertex(
1796
1818
  if getv(from_object, ['code_execution']) is not None:
1797
1819
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
1798
1820
 
1821
+ if getv(from_object, ['computer_use']) is not None:
1822
+ setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
1823
+
1799
1824
  return to_object
1800
1825
 
1801
1826
 
@@ -3036,6 +3061,9 @@ def _Tool_from_mldev(
3036
3061
  if getv(from_object, ['codeExecution']) is not None:
3037
3062
  setv(to_object, ['code_execution'], getv(from_object, ['codeExecution']))
3038
3063
 
3064
+ if getv(from_object, ['computerUse']) is not None:
3065
+ setv(to_object, ['computer_use'], getv(from_object, ['computerUse']))
3066
+
3039
3067
  return to_object
3040
3068
 
3041
3069
 
@@ -3697,6 +3725,27 @@ def _ListBatchJobsResponse_from_mldev(
3697
3725
  return to_object
3698
3726
 
3699
3727
 
3728
+ def _DeleteResourceJob_from_mldev(
3729
+ from_object: Union[dict[str, Any], object],
3730
+ parent_object: Optional[dict[str, Any]] = None,
3731
+ ) -> dict[str, Any]:
3732
+ to_object: dict[str, Any] = {}
3733
+ if getv(from_object, ['name']) is not None:
3734
+ setv(to_object, ['name'], getv(from_object, ['name']))
3735
+
3736
+ if getv(from_object, ['done']) is not None:
3737
+ setv(to_object, ['done'], getv(from_object, ['done']))
3738
+
3739
+ if getv(from_object, ['error']) is not None:
3740
+ setv(
3741
+ to_object,
3742
+ ['error'],
3743
+ _JobError_from_mldev(getv(from_object, ['error']), to_object),
3744
+ )
3745
+
3746
+ return to_object
3747
+
3748
+
3700
3749
  def _JobError_from_vertex(
3701
3750
  from_object: Union[dict[str, Any], object],
3702
3751
  parent_object: Optional[dict[str, Any]] = None,
@@ -4220,6 +4269,9 @@ def _Tool_from_vertex(
4220
4269
  if getv(from_object, ['codeExecution']) is not None:
4221
4270
  setv(to_object, ['code_execution'], getv(from_object, ['codeExecution']))
4222
4271
 
4272
+ if getv(from_object, ['computerUse']) is not None:
4273
+ setv(to_object, ['computer_use'], getv(from_object, ['computerUse']))
4274
+
4223
4275
  return to_object
4224
4276
 
4225
4277
 
@@ -4937,7 +4989,7 @@ class Batches(_api_module.BaseModule):
4937
4989
  name (str): A fully-qualified BatchJob resource name or ID.
4938
4990
  Example: "projects/.../locations/.../batchPredictionJobs/456" or "456"
4939
4991
  when project and location are initialized in the Vertex AI client. Or
4940
- "files/abc" using the Gemini Developer AI client.
4992
+ "batches/abc" using the Gemini Developer AI client.
4941
4993
 
4942
4994
  Returns:
4943
4995
  A BatchJob object that contains details about the batch job.
@@ -5022,7 +5074,7 @@ class Batches(_api_module.BaseModule):
5022
5074
  name (str): A fully-qualified BatchJob resource name or ID.
5023
5075
  Example: "projects/.../locations/.../batchPredictionJobs/456" or "456"
5024
5076
  when project and location are initialized in the Vertex AI client. Or
5025
- "files/abc" using the Gemini Developer AI client.
5077
+ "batches/abc" using the Gemini Developer AI client.
5026
5078
 
5027
5079
  Usage:
5028
5080
 
@@ -5161,9 +5213,8 @@ class Batches(_api_module.BaseModule):
5161
5213
  )
5162
5214
 
5163
5215
  request_url_dict: Optional[dict[str, str]]
5164
- if not self._api_client.vertexai:
5165
- raise ValueError('This method is only supported in the Vertex AI client.')
5166
- else:
5216
+
5217
+ if self._api_client.vertexai:
5167
5218
  request_dict = _DeleteBatchJobParameters_to_vertex(
5168
5219
  self._api_client, parameter_model
5169
5220
  )
@@ -5172,7 +5223,15 @@ class Batches(_api_module.BaseModule):
5172
5223
  path = 'batchPredictionJobs/{name}'.format_map(request_url_dict)
5173
5224
  else:
5174
5225
  path = 'batchPredictionJobs/{name}'
5175
-
5226
+ else:
5227
+ request_dict = _DeleteBatchJobParameters_to_mldev(
5228
+ self._api_client, parameter_model
5229
+ )
5230
+ request_url_dict = request_dict.get('_url')
5231
+ if request_url_dict:
5232
+ path = 'batches/{name}'.format_map(request_url_dict)
5233
+ else:
5234
+ path = 'batches/{name}'
5176
5235
  query_params = request_dict.get('_query')
5177
5236
  if query_params:
5178
5237
  path = f'{path}?{urlencode(query_params)}'
@@ -5198,6 +5257,9 @@ class Batches(_api_module.BaseModule):
5198
5257
  if self._api_client.vertexai:
5199
5258
  response_dict = _DeleteResourceJob_from_vertex(response_dict)
5200
5259
 
5260
+ else:
5261
+ response_dict = _DeleteResourceJob_from_mldev(response_dict)
5262
+
5201
5263
  return_value = types.DeleteResourceJob._from_response(
5202
5264
  response=response_dict, kwargs=parameter_model.model_dump()
5203
5265
  )
@@ -5354,7 +5416,7 @@ class AsyncBatches(_api_module.BaseModule):
5354
5416
  name (str): A fully-qualified BatchJob resource name or ID.
5355
5417
  Example: "projects/.../locations/.../batchPredictionJobs/456" or "456"
5356
5418
  when project and location are initialized in the Vertex AI client. Or
5357
- "files/abc" using the Gemini Developer AI client.
5419
+ "batches/abc" using the Gemini Developer AI client.
5358
5420
 
5359
5421
  Returns:
5360
5422
  A BatchJob object that contains details about the batch job.
@@ -5441,7 +5503,7 @@ class AsyncBatches(_api_module.BaseModule):
5441
5503
  name (str): A fully-qualified BatchJob resource name or ID.
5442
5504
  Example: "projects/.../locations/.../batchPredictionJobs/456" or "456"
5443
5505
  when project and location are initialized in the Vertex AI client. Or
5444
- "files/abc" using the Gemini Developer AI client.
5506
+ "batches/abc" using the Gemini Developer AI client.
5445
5507
 
5446
5508
  Usage:
5447
5509
 
@@ -5582,9 +5644,8 @@ class AsyncBatches(_api_module.BaseModule):
5582
5644
  )
5583
5645
 
5584
5646
  request_url_dict: Optional[dict[str, str]]
5585
- if not self._api_client.vertexai:
5586
- raise ValueError('This method is only supported in the Vertex AI client.')
5587
- else:
5647
+
5648
+ if self._api_client.vertexai:
5588
5649
  request_dict = _DeleteBatchJobParameters_to_vertex(
5589
5650
  self._api_client, parameter_model
5590
5651
  )
@@ -5593,7 +5654,15 @@ class AsyncBatches(_api_module.BaseModule):
5593
5654
  path = 'batchPredictionJobs/{name}'.format_map(request_url_dict)
5594
5655
  else:
5595
5656
  path = 'batchPredictionJobs/{name}'
5596
-
5657
+ else:
5658
+ request_dict = _DeleteBatchJobParameters_to_mldev(
5659
+ self._api_client, parameter_model
5660
+ )
5661
+ request_url_dict = request_dict.get('_url')
5662
+ if request_url_dict:
5663
+ path = 'batches/{name}'.format_map(request_url_dict)
5664
+ else:
5665
+ path = 'batches/{name}'
5597
5666
  query_params = request_dict.get('_query')
5598
5667
  if query_params:
5599
5668
  path = f'{path}?{urlencode(query_params)}'
@@ -5619,6 +5688,9 @@ class AsyncBatches(_api_module.BaseModule):
5619
5688
  if self._api_client.vertexai:
5620
5689
  response_dict = _DeleteResourceJob_from_vertex(response_dict)
5621
5690
 
5691
+ else:
5692
+ response_dict = _DeleteResourceJob_from_mldev(response_dict)
5693
+
5622
5694
  return_value = types.DeleteResourceJob._from_response(
5623
5695
  response=response_dict, kwargs=parameter_model.model_dump()
5624
5696
  )
google/genai/caches.py CHANGED
@@ -395,6 +395,9 @@ def _Tool_to_mldev(
395
395
  if getv(from_object, ['code_execution']) is not None:
396
396
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
397
397
 
398
+ if getv(from_object, ['computer_use']) is not None:
399
+ setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
400
+
398
401
  return to_object
399
402
 
400
403
 
@@ -1057,6 +1060,9 @@ def _Tool_to_vertex(
1057
1060
  if getv(from_object, ['code_execution']) is not None:
1058
1061
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
1059
1062
 
1063
+ if getv(from_object, ['computer_use']) is not None:
1064
+ setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
1065
+
1060
1066
  return to_object
1061
1067
 
1062
1068
 
google/genai/live.py CHANGED
@@ -1037,7 +1037,10 @@ class AsyncLive(_api_module.BaseModule):
1037
1037
  if headers is None:
1038
1038
  headers = {}
1039
1039
  _mcp_utils.set_mcp_usage_header(headers)
1040
- async with ws_connect(uri, additional_headers=headers) as ws:
1040
+
1041
+ async with ws_connect(
1042
+ uri, additional_headers=headers, **self._api_client._websocket_ssl_ctx
1043
+ ) as ws:
1041
1044
  await ws.send(request)
1042
1045
  try:
1043
1046
  # websockets 14.0+
google/genai/models.py CHANGED
@@ -517,6 +517,9 @@ def _Tool_to_mldev(
517
517
  if getv(from_object, ['code_execution']) is not None:
518
518
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
519
519
 
520
+ if getv(from_object, ['computer_use']) is not None:
521
+ setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
522
+
520
523
  return to_object
521
524
 
522
525
 
@@ -1958,6 +1961,9 @@ def _Tool_to_vertex(
1958
1961
  if getv(from_object, ['code_execution']) is not None:
1959
1962
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
1960
1963
 
1964
+ if getv(from_object, ['computer_use']) is not None:
1965
+ setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
1966
+
1961
1967
  return to_object
1962
1968
 
1963
1969