google-genai 0.3.0__tar.gz → 0.5.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. {google_genai-0.3.0/google_genai.egg-info → google_genai-0.5.0}/PKG-INFO +57 -17
  2. {google_genai-0.3.0 → google_genai-0.5.0}/README.md +55 -15
  3. {google_genai-0.3.0 → google_genai-0.5.0}/google/genai/__init__.py +2 -1
  4. {google_genai-0.3.0 → google_genai-0.5.0}/google/genai/_api_client.py +161 -52
  5. {google_genai-0.3.0 → google_genai-0.5.0}/google/genai/_automatic_function_calling_util.py +14 -14
  6. {google_genai-0.3.0 → google_genai-0.5.0}/google/genai/_common.py +14 -29
  7. {google_genai-0.3.0 → google_genai-0.5.0}/google/genai/_replay_api_client.py +13 -54
  8. {google_genai-0.3.0 → google_genai-0.5.0}/google/genai/_transformers.py +38 -0
  9. {google_genai-0.3.0 → google_genai-0.5.0}/google/genai/batches.py +80 -78
  10. {google_genai-0.3.0 → google_genai-0.5.0}/google/genai/caches.py +112 -98
  11. {google_genai-0.3.0 → google_genai-0.5.0}/google/genai/chats.py +7 -10
  12. {google_genai-0.3.0 → google_genai-0.5.0}/google/genai/client.py +6 -3
  13. {google_genai-0.3.0 → google_genai-0.5.0}/google/genai/files.py +91 -90
  14. {google_genai-0.3.0 → google_genai-0.5.0}/google/genai/live.py +65 -34
  15. {google_genai-0.3.0 → google_genai-0.5.0}/google/genai/models.py +374 -297
  16. {google_genai-0.3.0 → google_genai-0.5.0}/google/genai/tunings.py +87 -85
  17. {google_genai-0.3.0 → google_genai-0.5.0}/google/genai/types.py +167 -82
  18. google_genai-0.5.0/google/genai/version.py +16 -0
  19. {google_genai-0.3.0 → google_genai-0.5.0/google_genai.egg-info}/PKG-INFO +57 -17
  20. {google_genai-0.3.0 → google_genai-0.5.0}/google_genai.egg-info/SOURCES.txt +1 -0
  21. {google_genai-0.3.0 → google_genai-0.5.0}/pyproject.toml +1 -1
  22. {google_genai-0.3.0 → google_genai-0.5.0}/LICENSE +0 -0
  23. {google_genai-0.3.0 → google_genai-0.5.0}/google/genai/_extra_utils.py +0 -0
  24. {google_genai-0.3.0 → google_genai-0.5.0}/google/genai/_test_api_client.py +0 -0
  25. {google_genai-0.3.0 → google_genai-0.5.0}/google/genai/errors.py +0 -0
  26. {google_genai-0.3.0 → google_genai-0.5.0}/google/genai/pagers.py +0 -0
  27. {google_genai-0.3.0 → google_genai-0.5.0}/google_genai.egg-info/dependency_links.txt +0 -0
  28. {google_genai-0.3.0 → google_genai-0.5.0}/google_genai.egg-info/requires.txt +0 -0
  29. {google_genai-0.3.0 → google_genai-0.5.0}/google_genai.egg-info/top_level.txt +0 -0
  30. {google_genai-0.3.0 → google_genai-0.5.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.2
2
2
  Name: google-genai
3
- Version: 0.3.0
3
+ Version: 0.5.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License: Apache-2.0
@@ -436,10 +436,10 @@ response1 = client.models.generate_image(
436
436
  model='imagen-3.0-generate-001',
437
437
  prompt='An umbrella in the foreground, and a rainy night sky in the background',
438
438
  config=types.GenerateImageConfig(
439
- negative_prompt= "human",
439
+ negative_prompt= 'human',
440
440
  number_of_images= 1,
441
441
  include_rai_reason= True,
442
- output_mime_type= "image/jpeg"
442
+ output_mime_type= 'image/jpeg'
443
443
  )
444
444
  )
445
445
  response1.generated_images[0].image.show()
@@ -454,7 +454,11 @@ Upscale image is not supported in Google AI.
454
454
  response2 = client.models.upscale_image(
455
455
  model='imagen-3.0-generate-001',
456
456
  image=response1.generated_images[0].image,
457
- config=types.UpscaleImageConfig(upscale_factor="x2")
457
+ upscale_factor='x2',
458
+ config=types.UpscaleImageConfig(
459
+ include_rai_reason= True,
460
+ output_mime_type= 'image/jpeg',
461
+ ),
458
462
  )
459
463
  response2.generated_images[0].image.show()
460
464
  ```
@@ -497,6 +501,42 @@ response3 = client.models.edit_image(
497
501
  response3.generated_images[0].image.show()
498
502
  ```
499
503
 
504
+ ## Chats
505
+
506
+ Create a chat session to start a multi-turn conversations with the model.
507
+
508
+ ### Send Message
509
+
510
+ ```python
511
+ chat = client.chats.create(model='gemini-2.0-flash-exp')
512
+ response = chat.send_message('tell me a story')
513
+ print(response.text)
514
+ ```
515
+
516
+ ### Streaming
517
+
518
+ ```python
519
+ chat = client.chats.create(model='gemini-2.0-flash-exp')
520
+ for chunk in chat.send_message_stream('tell me a story'):
521
+ print(chunk.text)
522
+ ```
523
+
524
+ ### Async
525
+
526
+ ```python
527
+ chat = client.aio.chats.create(model='gemini-2.0-flash-exp')
528
+ response = await chat.send_message('tell me a story')
529
+ print(response.text)
530
+ ```
531
+
532
+ ### Async Streaming
533
+
534
+ ```python
535
+ chat = client.aio.chats.create(model='gemini-2.0-flash-exp')
536
+ async for chunk in chat.send_message_stream('tell me a story'):
537
+ print(chunk.text)
538
+ ```
539
+
500
540
  ## Files (Only Google AI)
501
541
 
502
542
  ``` python
@@ -539,19 +579,19 @@ else:
539
579
 
540
580
  cached_content = client.caches.create(
541
581
  model='gemini-1.5-pro-002',
542
- contents=[
543
- types.Content(
544
- role='user',
545
- parts=[
546
- types.Part.from_uri(
547
- file_uri=file_uris[0],
548
- mime_type='application/pdf'),
549
- types.Part.from_uri(
550
- file_uri=file_uris[1],
551
- mime_type='application/pdf',)])
552
- ],
553
- system_instruction='What is the sum of the two pdfs?',
554
582
  config=types.CreateCachedContentConfig(
583
+ contents=[
584
+ types.Content(
585
+ role='user',
586
+ parts=[
587
+ types.Part.from_uri(
588
+ file_uri=file_uris[0],
589
+ mime_type='application/pdf'),
590
+ types.Part.from_uri(
591
+ file_uri=file_uris[1],
592
+ mime_type='application/pdf',)])
593
+ ],
594
+ system_instruction='What is the sum of the two pdfs?',
555
595
  display_name='test cache',
556
596
  ttl='3600s',
557
597
  ),
@@ -408,10 +408,10 @@ response1 = client.models.generate_image(
408
408
  model='imagen-3.0-generate-001',
409
409
  prompt='An umbrella in the foreground, and a rainy night sky in the background',
410
410
  config=types.GenerateImageConfig(
411
- negative_prompt= "human",
411
+ negative_prompt= 'human',
412
412
  number_of_images= 1,
413
413
  include_rai_reason= True,
414
- output_mime_type= "image/jpeg"
414
+ output_mime_type= 'image/jpeg'
415
415
  )
416
416
  )
417
417
  response1.generated_images[0].image.show()
@@ -426,7 +426,11 @@ Upscale image is not supported in Google AI.
426
426
  response2 = client.models.upscale_image(
427
427
  model='imagen-3.0-generate-001',
428
428
  image=response1.generated_images[0].image,
429
- config=types.UpscaleImageConfig(upscale_factor="x2")
429
+ upscale_factor='x2',
430
+ config=types.UpscaleImageConfig(
431
+ include_rai_reason= True,
432
+ output_mime_type= 'image/jpeg',
433
+ ),
430
434
  )
431
435
  response2.generated_images[0].image.show()
432
436
  ```
@@ -469,6 +473,42 @@ response3 = client.models.edit_image(
469
473
  response3.generated_images[0].image.show()
470
474
  ```
471
475
 
476
+ ## Chats
477
+
478
+ Create a chat session to start a multi-turn conversations with the model.
479
+
480
+ ### Send Message
481
+
482
+ ```python
483
+ chat = client.chats.create(model='gemini-2.0-flash-exp')
484
+ response = chat.send_message('tell me a story')
485
+ print(response.text)
486
+ ```
487
+
488
+ ### Streaming
489
+
490
+ ```python
491
+ chat = client.chats.create(model='gemini-2.0-flash-exp')
492
+ for chunk in chat.send_message_stream('tell me a story'):
493
+ print(chunk.text)
494
+ ```
495
+
496
+ ### Async
497
+
498
+ ```python
499
+ chat = client.aio.chats.create(model='gemini-2.0-flash-exp')
500
+ response = await chat.send_message('tell me a story')
501
+ print(response.text)
502
+ ```
503
+
504
+ ### Async Streaming
505
+
506
+ ```python
507
+ chat = client.aio.chats.create(model='gemini-2.0-flash-exp')
508
+ async for chunk in chat.send_message_stream('tell me a story'):
509
+ print(chunk.text)
510
+ ```
511
+
472
512
  ## Files (Only Google AI)
473
513
 
474
514
  ``` python
@@ -511,19 +551,19 @@ else:
511
551
 
512
552
  cached_content = client.caches.create(
513
553
  model='gemini-1.5-pro-002',
514
- contents=[
515
- types.Content(
516
- role='user',
517
- parts=[
518
- types.Part.from_uri(
519
- file_uri=file_uris[0],
520
- mime_type='application/pdf'),
521
- types.Part.from_uri(
522
- file_uri=file_uris[1],
523
- mime_type='application/pdf',)])
524
- ],
525
- system_instruction='What is the sum of the two pdfs?',
526
554
  config=types.CreateCachedContentConfig(
555
+ contents=[
556
+ types.Content(
557
+ role='user',
558
+ parts=[
559
+ types.Part.from_uri(
560
+ file_uri=file_uris[0],
561
+ mime_type='application/pdf'),
562
+ types.Part.from_uri(
563
+ file_uri=file_uris[1],
564
+ mime_type='application/pdf',)])
565
+ ],
566
+ system_instruction='What is the sum of the two pdfs?',
527
567
  display_name='test cache',
528
568
  ttl='3600s',
529
569
  ),
@@ -16,7 +16,8 @@
16
16
  """Google Gen AI SDK"""
17
17
 
18
18
  from .client import Client
19
+ from . import version
19
20
 
20
- __version__ = '0.3.0'
21
+ __version__ = version.__version__
21
22
 
22
23
  __all__ = ['Client']
@@ -21,37 +21,74 @@ import copy
21
21
  from dataclasses import dataclass
22
22
  import datetime
23
23
  import json
24
+ import logging
24
25
  import os
25
26
  import sys
26
- from typing import Any, Optional, TypedDict, Union
27
+ from typing import Any, Optional, Tuple, TypedDict, Union
27
28
  from urllib.parse import urlparse, urlunparse
28
29
 
29
30
  import google.auth
30
31
  import google.auth.credentials
31
32
  from google.auth.transport.requests import AuthorizedSession
32
- from pydantic import BaseModel
33
+ from pydantic import BaseModel, ConfigDict, Field, ValidationError
33
34
  import requests
34
35
 
35
36
  from . import errors
37
+ from . import version
36
38
 
37
39
 
38
- class HttpOptions(TypedDict):
40
+ class HttpOptions(BaseModel):
41
+ """HTTP options for the api client."""
42
+ model_config = ConfigDict(extra='forbid')
43
+
44
+ base_url: Optional[str] = Field(
45
+ default=None,
46
+ description="""The base URL for the AI platform service endpoint.""",
47
+ )
48
+ api_version: Optional[str] = Field(
49
+ default=None,
50
+ description="""Specifies the version of the API to use.""",
51
+ )
52
+ headers: Optional[dict[str, str]] = Field(
53
+ default=None,
54
+ description="""Additional HTTP headers to be sent with the request.""",
55
+ )
56
+ response_payload: Optional[dict] = Field(
57
+ default=None,
58
+ description="""If set, the response payload will be returned int the supplied dict.""",
59
+ )
60
+ timeout: Optional[Union[float, Tuple[float, float]]] = Field(
61
+ default=None,
62
+ description="""Timeout for the request in seconds.""",
63
+ )
64
+ skip_project_and_location_in_path: bool = Field(
65
+ default=False,
66
+ description="""If set to True, the project and location will not be appended to the path.""",
67
+ )
68
+
69
+
70
+ class HttpOptionsDict(TypedDict):
39
71
  """HTTP options for the api client."""
40
72
 
41
- base_url: str = None
73
+ base_url: Optional[str] = None
42
74
  """The base URL for the AI platform service endpoint."""
43
- api_version: str = None
75
+ api_version: Optional[str] = None
44
76
  """Specifies the version of the API to use."""
45
- headers: dict[str, dict] = None
77
+ headers: Optional[dict[str, str]] = None
46
78
  """Additional HTTP headers to be sent with the request."""
47
- response_payload: dict = None
79
+ response_payload: Optional[dict] = None
48
80
  """If set, the response payload will be returned int the supplied dict."""
81
+ timeout: Optional[Union[float, Tuple[float, float]]] = None
82
+ """Timeout for the request in seconds."""
83
+ skip_project_and_location_in_path: bool = False
84
+ """If set to True, the project and location will not be appended to the path."""
85
+
86
+ HttpOptionsOrDict = Union[HttpOptions, HttpOptionsDict]
49
87
 
50
88
 
51
89
  def _append_library_version_headers(headers: dict[str, str]) -> None:
52
90
  """Appends the telemetry header to the headers dict."""
53
- # TODO: Automate revisions to the SDK library version.
54
- library_label = f'google-genai-sdk/0.3.0'
91
+ library_label = f'google-genai-sdk/{version.__version__}'
55
92
  language_label = 'gl-python/' + sys.version.split()[0]
56
93
  version_header_value = f'{library_label} {language_label}'
57
94
  if (
@@ -71,20 +108,24 @@ def _append_library_version_headers(headers: dict[str, str]) -> None:
71
108
 
72
109
 
73
110
  def _patch_http_options(
74
- options: HttpOptions, patch_options: HttpOptions
75
- ) -> HttpOptions:
111
+ options: HttpOptionsDict, patch_options: HttpOptionsDict
112
+ ) -> HttpOptionsDict:
76
113
  # use shallow copy so we don't override the original objects.
77
- copy_option = HttpOptions()
114
+ copy_option = HttpOptionsDict()
78
115
  copy_option.update(options)
79
- for k, v in patch_options.items():
116
+ for patch_key, patch_value in patch_options.items():
80
117
  # if both are dicts, update the copy.
81
118
  # This is to handle cases like merging headers.
82
- if isinstance(v, dict) and isinstance(copy_option.get(k, None), dict):
83
- copy_option[k] = {}
84
- copy_option[k].update(options[k]) # shallow copy from original options.
85
- copy_option[k].update(v)
86
- elif v is not None: # Accept empty values.
87
- copy_option[k] = v
119
+ if isinstance(patch_value, dict) and isinstance(
120
+ copy_option.get(patch_key, None), dict
121
+ ):
122
+ copy_option[patch_key] = {}
123
+ copy_option[patch_key].update(
124
+ options[patch_key]
125
+ ) # shallow copy from original options.
126
+ copy_option[patch_key].update(patch_value)
127
+ elif patch_value is not None: # Accept empty values.
128
+ copy_option[patch_key] = patch_value
88
129
  _append_library_version_headers(copy_option['headers'])
89
130
  return copy_option
90
131
 
@@ -102,6 +143,7 @@ class HttpRequest:
102
143
  url: str
103
144
  method: str
104
145
  data: Union[dict[str, object], bytes]
146
+ timeout: Optional[Union[float, Tuple[float, float]]] = None
105
147
 
106
148
 
107
149
  class HttpResponse:
@@ -147,7 +189,7 @@ class ApiClient:
147
189
  credentials: google.auth.credentials.Credentials = None,
148
190
  project: Union[str, None] = None,
149
191
  location: Union[str, None] = None,
150
- http_options: HttpOptions = None,
192
+ http_options: HttpOptionsOrDict = None,
151
193
  ):
152
194
  self.vertexai = vertexai
153
195
  if self.vertexai is None:
@@ -159,30 +201,84 @@ class ApiClient:
159
201
 
160
202
  # Validate explicitly set intializer values.
161
203
  if (project or location) and api_key:
204
+ # API cannot consume both project/location and api_key.
162
205
  raise ValueError(
163
206
  'Project/location and API key are mutually exclusive in the client initializer.'
164
207
  )
208
+ elif credentials and api_key:
209
+ # API cannot consume both credentials and api_key.
210
+ raise ValueError(
211
+ 'Credentials and API key are mutually exclusive in the client initializer.'
212
+ )
213
+
214
+ # Validate http_options if a dict is provided.
215
+ if isinstance(http_options, dict):
216
+ try:
217
+ HttpOptions.model_validate(http_options)
218
+ except ValidationError as e:
219
+ raise ValueError(f'Invalid http_options: {e}')
220
+ elif(isinstance(http_options, HttpOptions)):
221
+ http_options = http_options.model_dump()
222
+
223
+ # Retrieve implicitly set values from the environment.
224
+ env_project = os.environ.get('GOOGLE_CLOUD_PROJECT', None)
225
+ env_location = os.environ.get('GOOGLE_CLOUD_LOCATION', None)
226
+ env_api_key = os.environ.get('GOOGLE_API_KEY', None)
227
+ self.project = project or env_project
228
+ self.location = location or env_location
229
+ self.api_key = api_key or env_api_key
165
230
 
166
- self.api_key: Optional[str] = None
167
- self.project = project or os.environ.get('GOOGLE_CLOUD_PROJECT', None)
168
- self.location = location or os.environ.get('GOOGLE_CLOUD_LOCATION', None)
169
231
  self._credentials = credentials
170
- self._http_options = HttpOptions()
232
+ self._http_options = HttpOptionsDict()
171
233
 
234
+ # Handle when to use Vertex AI in express mode (api key).
235
+ # Explicit initializer arguments are already validated above.
172
236
  if self.vertexai:
173
- if not self.project:
237
+ if credentials:
238
+ # Explicit credentials take precedence over implicit api_key.
239
+ logging.info(
240
+ 'The user provided Google Cloud credentials will take precedence'
241
+ + ' over the API key from the environment variable.'
242
+ )
243
+ self.api_key = None
244
+ elif (env_location or env_project) and api_key:
245
+ # Explicit api_key takes precedence over implicit project/location.
246
+ logging.info(
247
+ 'The user provided Vertex AI API key will take precedence over the'
248
+ + ' project/location from the environment variables.'
249
+ )
250
+ self.project = None
251
+ self.location = None
252
+ elif (project or location) and env_api_key:
253
+ # Explicit project/location takes precedence over implicit api_key.
254
+ logging.info(
255
+ 'The user provided project/location will take precedence over the'
256
+ + ' Vertex AI API key from the environment variable.'
257
+ )
258
+ self.api_key = None
259
+ elif (env_location or env_project) and env_api_key:
260
+ # Implicit project/location takes precedence over implicit api_key.
261
+ logging.info(
262
+ 'The project/location from the environment variables will take'
263
+ + ' precedence over the API key from the environment variables.'
264
+ )
265
+ self.api_key = None
266
+ if not self.project and not self.api_key:
174
267
  self.project = google.auth.default()[1]
175
- # Will change this to support EasyGCP in the future.
176
- if not self.project or not self.location:
268
+ if not (self.project or self.location) and not self.api_key:
177
269
  raise ValueError(
178
- 'Project and location must be set when using the Vertex AI API.'
270
+ 'Project/location or API key must be set when using the Vertex AI API.'
271
+ )
272
+ if self.api_key:
273
+ self._http_options['base_url'] = (
274
+ f'https://aiplatform.googleapis.com/'
275
+ )
276
+ else:
277
+ self._http_options['base_url'] = (
278
+ f'https://{self.location}-aiplatform.googleapis.com/'
179
279
  )
180
- self._http_options['base_url'] = (
181
- f'https://{self.location}-aiplatform.googleapis.com/'
182
- )
183
280
  self._http_options['api_version'] = 'v1beta1'
184
281
  else: # ML Dev API
185
- self.api_key = api_key or os.environ.get('GOOGLE_API_KEY', None)
186
282
  if not self.api_key:
187
283
  raise ValueError('API key must be set when using the Google AI API.')
188
284
  self._http_options['base_url'] = (
@@ -191,7 +287,7 @@ class ApiClient:
191
287
  self._http_options['api_version'] = 'v1beta'
192
288
  # Default options for both clients.
193
289
  self._http_options['headers'] = {'Content-Type': 'application/json'}
194
- if self.api_key:
290
+ if self.api_key and not self.vertexai:
195
291
  self._http_options['headers']['x-goog-api-key'] = self.api_key
196
292
  # Update the http options with the user provided http options.
197
293
  if http_options:
@@ -208,7 +304,7 @@ class ApiClient:
208
304
  http_method: str,
209
305
  path: str,
210
306
  request_dict: dict[str, object],
211
- http_options: HttpOptions = None,
307
+ http_options: HttpOptionsDict = None,
212
308
  ) -> HttpRequest:
213
309
  # Remove all special dict keys such as _url and _query.
214
310
  keys_to_delete = [key for key in request_dict.keys() if key.startswith('_')]
@@ -221,8 +317,18 @@ class ApiClient:
221
317
  )
222
318
  else:
223
319
  patched_http_options = self._http_options
224
- if self.vertexai and not path.startswith('projects/'):
320
+ skip_project_and_location_in_path_val = patched_http_options.get(
321
+ 'skip_project_and_location_in_path', False
322
+ )
323
+ if (
324
+ self.vertexai
325
+ and not path.startswith('projects/')
326
+ and not skip_project_and_location_in_path_val
327
+ and not self.api_key
328
+ ):
225
329
  path = f'projects/{self.project}/locations/{self.location}/' + path
330
+ elif self.vertexai and self.api_key:
331
+ path = f'{path}?key={self.api_key}'
226
332
  url = _join_url_path(
227
333
  patched_http_options['base_url'],
228
334
  patched_http_options['api_version'] + '/' + path,
@@ -232,6 +338,7 @@ class ApiClient:
232
338
  url=url,
233
339
  headers=patched_http_options['headers'],
234
340
  data=request_dict,
341
+ timeout=patched_http_options.get('timeout', None),
235
342
  )
236
343
 
237
344
  def _request(
@@ -239,7 +346,7 @@ class ApiClient:
239
346
  http_request: HttpRequest,
240
347
  stream: bool = False,
241
348
  ) -> HttpResponse:
242
- if self.vertexai:
349
+ if self.vertexai and not self.api_key:
243
350
  if not self._credentials:
244
351
  self._credentials, _ = google.auth.default(
245
352
  scopes=["https://www.googleapis.com/auth/cloud-platform"],
@@ -250,10 +357,10 @@ class ApiClient:
250
357
  http_request.method.upper(),
251
358
  http_request.url,
252
359
  headers=http_request.headers,
253
- data=json.dumps(http_request.data, cls=RequestJsonEncoder) if http_request.data else None,
254
- # TODO: support timeout in RequestOptions so it can be configured
255
- # per methods.
256
- timeout=None,
360
+ data=json.dumps(http_request.data, cls=RequestJsonEncoder)
361
+ if http_request.data
362
+ else None,
363
+ timeout=http_request.timeout,
257
364
  )
258
365
  errors.APIError.raise_for_response(response)
259
366
  return HttpResponse(
@@ -275,13 +382,14 @@ class ApiClient:
275
382
  data = http_request.data
276
383
 
277
384
  http_session = requests.Session()
278
- request = requests.Request(
385
+ response = http_session.request(
279
386
  method=http_request.method,
280
387
  url=http_request.url,
281
388
  headers=http_request.headers,
282
389
  data=data,
283
- ).prepare()
284
- response = http_session.send(request, stream=stream)
390
+ timeout=http_request.timeout,
391
+ stream=stream,
392
+ )
285
393
  errors.APIError.raise_for_response(response)
286
394
  return HttpResponse(
287
395
  response.headers, response if stream else [response.text]
@@ -307,8 +415,10 @@ class ApiClient:
307
415
  stream=stream,
308
416
  )
309
417
 
310
- def get_read_only_http_options(self) -> HttpOptions:
311
- copied = HttpOptions()
418
+ def get_read_only_http_options(self) -> HttpOptionsDict:
419
+ copied = HttpOptionsDict()
420
+ if isinstance(self._http_options, BaseModel):
421
+ self._http_options = self._http_options.model_dump()
312
422
  copied.update(self._http_options)
313
423
  return copied
314
424
 
@@ -317,7 +427,7 @@ class ApiClient:
317
427
  http_method: str,
318
428
  path: str,
319
429
  request_dict: dict[str, object],
320
- http_options: HttpOptions = None,
430
+ http_options: HttpOptionsDict = None,
321
431
  ):
322
432
  http_request = self._build_request(
323
433
  http_method, path, request_dict, http_options
@@ -332,7 +442,7 @@ class ApiClient:
332
442
  http_method: str,
333
443
  path: str,
334
444
  request_dict: dict[str, object],
335
- http_options: HttpOptions = None,
445
+ http_options: HttpOptionsDict = None,
336
446
  ):
337
447
  http_request = self._build_request(
338
448
  http_method, path, request_dict, http_options
@@ -349,7 +459,7 @@ class ApiClient:
349
459
  http_method: str,
350
460
  path: str,
351
461
  request_dict: dict[str, object],
352
- http_options: HttpOptions = None,
462
+ http_options: HttpOptionsDict = None,
353
463
  ) -> dict[str, object]:
354
464
  http_request = self._build_request(
355
465
  http_method, path, request_dict, http_options
@@ -365,7 +475,7 @@ class ApiClient:
365
475
  http_method: str,
366
476
  path: str,
367
477
  request_dict: dict[str, object],
368
- http_options: HttpOptions = None,
478
+ http_options: HttpOptionsDict = None,
369
479
  ):
370
480
  http_request = self._build_request(
371
481
  http_method, path, request_dict, http_options
@@ -464,13 +574,12 @@ class ApiClient:
464
574
  pass
465
575
 
466
576
 
577
+ # TODO(b/389693448): Cleanup datetime hacks.
467
578
  class RequestJsonEncoder(json.JSONEncoder):
468
579
  """Encode bytes as strings without modify its content."""
469
580
 
470
581
  def default(self, o):
471
- if isinstance(o, bytes):
472
- return o.decode()
473
- elif isinstance(o, datetime.datetime):
582
+ if isinstance(o, datetime.datetime):
474
583
  # This Zulu time format is used by the Vertex AI API and the test recorder
475
584
  # Using strftime works well, but we want to align with the replay encoder.
476
585
  # o.astimezone(datetime.timezone.utc).strftime('%Y-%m-%dT%H:%M:%S.%fZ')