google-genai 1.53.0__tar.gz → 1.54.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. {google_genai-1.53.0/google_genai.egg-info → google_genai-1.54.0}/PKG-INFO +2 -2
  2. {google_genai-1.53.0 → google_genai-1.54.0}/README.md +1 -1
  3. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/_api_client.py +6 -6
  4. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/errors.py +16 -1
  5. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/types.py +157 -121
  6. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/version.py +1 -1
  7. {google_genai-1.53.0 → google_genai-1.54.0/google_genai.egg-info}/PKG-INFO +2 -2
  8. {google_genai-1.53.0 → google_genai-1.54.0}/pyproject.toml +1 -1
  9. {google_genai-1.53.0 → google_genai-1.54.0}/LICENSE +0 -0
  10. {google_genai-1.53.0 → google_genai-1.54.0}/MANIFEST.in +0 -0
  11. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/__init__.py +0 -0
  12. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/_adapters.py +0 -0
  13. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/_api_module.py +0 -0
  14. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/_automatic_function_calling_util.py +0 -0
  15. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/_base_transformers.py +0 -0
  16. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/_base_url.py +0 -0
  17. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/_common.py +0 -0
  18. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/_extra_utils.py +0 -0
  19. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/_live_converters.py +3 -3
  20. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/_local_tokenizer_loader.py +0 -0
  21. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/_mcp_utils.py +0 -0
  22. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/_operations_converters.py +0 -0
  23. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/_replay_api_client.py +0 -0
  24. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/_test_api_client.py +0 -0
  25. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/_tokens_converters.py +0 -0
  26. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/_transformers.py +0 -0
  27. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/batches.py +55 -55
  28. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/caches.py +0 -0
  29. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/chats.py +0 -0
  30. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/client.py +0 -0
  31. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/documents.py +0 -0
  32. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/file_search_stores.py +60 -60
  33. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/files.py +56 -56
  34. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/live.py +0 -0
  35. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/live_music.py +0 -0
  36. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/local_tokenizer.py +0 -0
  37. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/models.py +3 -3
  38. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/operations.py +0 -0
  39. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/pagers.py +0 -0
  40. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/py.typed +0 -0
  41. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/tokens.py +0 -0
  42. {google_genai-1.53.0 → google_genai-1.54.0}/google/genai/tunings.py +57 -57
  43. {google_genai-1.53.0 → google_genai-1.54.0}/google_genai.egg-info/SOURCES.txt +0 -0
  44. {google_genai-1.53.0 → google_genai-1.54.0}/google_genai.egg-info/dependency_links.txt +0 -0
  45. {google_genai-1.53.0 → google_genai-1.54.0}/google_genai.egg-info/requires.txt +0 -0
  46. {google_genai-1.53.0 → google_genai-1.54.0}/google_genai.egg-info/top_level.txt +0 -0
  47. {google_genai-1.53.0 → google_genai-1.54.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-genai
3
- Version: 1.53.0
3
+ Version: 1.54.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License-Expression: Apache-2.0
@@ -213,7 +213,7 @@ await aclient.aclose()
213
213
  ## Client context managers
214
214
 
215
215
  By using the sync client context manager, it will close the underlying
216
- sync client when exiting the with block.
216
+ sync client when exiting the with block and avoid httpx "client has been closed" error like [issues#1763](https://github.com/googleapis/python-genai/issues/1763).
217
217
 
218
218
  ```python
219
219
  from google.genai import Client
@@ -177,7 +177,7 @@ await aclient.aclose()
177
177
  ## Client context managers
178
178
 
179
179
  By using the sync client context manager, it will close the underlying
180
- sync client when exiting the with block.
180
+ sync client when exiting the with block and avoid httpx "client has been closed" error like [issues#1763](https://github.com/googleapis/python-genai/issues/1763).
181
181
 
182
182
  ```python
183
183
  from google.genai import Client
@@ -1254,7 +1254,7 @@ class BaseApiClient:
1254
1254
  url=http_request.url,
1255
1255
  headers=http_request.headers,
1256
1256
  data=data,
1257
- timeout=aiohttp.ClientTimeout(connect=http_request.timeout),
1257
+ timeout=aiohttp.ClientTimeout(total=http_request.timeout),
1258
1258
  **self._async_client_session_request_args,
1259
1259
  )
1260
1260
  except (
@@ -1276,7 +1276,7 @@ class BaseApiClient:
1276
1276
  url=http_request.url,
1277
1277
  headers=http_request.headers,
1278
1278
  data=data,
1279
- timeout=aiohttp.ClientTimeout(connect=http_request.timeout),
1279
+ timeout=aiohttp.ClientTimeout(total=http_request.timeout),
1280
1280
  **self._async_client_session_request_args,
1281
1281
  )
1282
1282
 
@@ -1306,7 +1306,7 @@ class BaseApiClient:
1306
1306
  url=http_request.url,
1307
1307
  headers=http_request.headers,
1308
1308
  data=data,
1309
- timeout=aiohttp.ClientTimeout(connect=http_request.timeout),
1309
+ timeout=aiohttp.ClientTimeout(total=http_request.timeout),
1310
1310
  **self._async_client_session_request_args,
1311
1311
  )
1312
1312
  await errors.APIError.raise_for_async_response(response)
@@ -1330,7 +1330,7 @@ class BaseApiClient:
1330
1330
  url=http_request.url,
1331
1331
  headers=http_request.headers,
1332
1332
  data=data,
1333
- timeout=aiohttp.ClientTimeout(connect=http_request.timeout),
1333
+ timeout=aiohttp.ClientTimeout(total=http_request.timeout),
1334
1334
  **self._async_client_session_request_args,
1335
1335
  )
1336
1336
  await errors.APIError.raise_for_async_response(response)
@@ -1718,7 +1718,7 @@ class BaseApiClient:
1718
1718
  url=upload_url,
1719
1719
  data=file_chunk,
1720
1720
  headers=upload_headers,
1721
- timeout=aiohttp.ClientTimeout(connect=timeout_in_seconds),
1721
+ timeout=aiohttp.ClientTimeout(total=timeout_in_seconds),
1722
1722
  )
1723
1723
 
1724
1724
  if response.headers.get('X-Goog-Upload-Status'):
@@ -1864,7 +1864,7 @@ class BaseApiClient:
1864
1864
  url=http_request.url,
1865
1865
  headers=http_request.headers,
1866
1866
  data=data,
1867
- timeout=aiohttp.ClientTimeout(connect=http_request.timeout),
1867
+ timeout=aiohttp.ClientTimeout(total=http_request.timeout),
1868
1868
  )
1869
1869
  await errors.APIError.raise_for_async_response(response)
1870
1870
 
@@ -15,7 +15,7 @@
15
15
 
16
16
  """Error classes for the GenAI SDK."""
17
17
 
18
- from typing import Any, Optional, TYPE_CHECKING, Union
18
+ from typing import Any, Callable, Optional, TYPE_CHECKING, Union
19
19
  import httpx
20
20
  import json
21
21
  from . import _common
@@ -53,6 +53,21 @@ class APIError(Exception):
53
53
 
54
54
  super().__init__(f'{self.code} {self.status}. {self.details}')
55
55
 
56
+ def __reduce__(
57
+ self,
58
+ ) -> tuple[Callable[..., 'APIError'], tuple[dict[str, Any]]]:
59
+ """Returns a tuple that can be used to reconstruct the error for pickling."""
60
+ state = self.__dict__.copy()
61
+ return (self.__class__._rebuild, (state,))
62
+
63
+ @staticmethod
64
+ def _rebuild(state: dict[str, Any]) -> 'APIError':
65
+ """Rebuilds the error from the state."""
66
+ obj = APIError.__new__(APIError)
67
+ obj.__dict__.update(state)
68
+ Exception.__init__(obj, f'{obj.code} {obj.status}. {obj.details}')
69
+ return obj
70
+
56
71
  def _get_status(self, response_json: Any) -> Any:
57
72
  return response_json.get(
58
73
  'status', response_json.get('error', {}).get('status', None)
@@ -4368,6 +4368,163 @@ class ToolConfigDict(TypedDict, total=False):
4368
4368
  ToolConfigOrDict = Union[ToolConfig, ToolConfigDict]
4369
4369
 
4370
4370
 
4371
+ class ReplicatedVoiceConfig(_common.BaseModel):
4372
+ """ReplicatedVoiceConfig is used to configure replicated voice."""
4373
+
4374
+ mime_type: Optional[str] = Field(
4375
+ default=None,
4376
+ description="""The mime type of the replicated voice.
4377
+ """,
4378
+ )
4379
+ voice_sample_audio: Optional[bytes] = Field(
4380
+ default=None,
4381
+ description="""The sample audio of the replicated voice.
4382
+ """,
4383
+ )
4384
+
4385
+
4386
+ class ReplicatedVoiceConfigDict(TypedDict, total=False):
4387
+ """ReplicatedVoiceConfig is used to configure replicated voice."""
4388
+
4389
+ mime_type: Optional[str]
4390
+ """The mime type of the replicated voice.
4391
+ """
4392
+
4393
+ voice_sample_audio: Optional[bytes]
4394
+ """The sample audio of the replicated voice.
4395
+ """
4396
+
4397
+
4398
+ ReplicatedVoiceConfigOrDict = Union[
4399
+ ReplicatedVoiceConfig, ReplicatedVoiceConfigDict
4400
+ ]
4401
+
4402
+
4403
+ class PrebuiltVoiceConfig(_common.BaseModel):
4404
+ """The configuration for the prebuilt speaker to use."""
4405
+
4406
+ voice_name: Optional[str] = Field(
4407
+ default=None, description="""The name of the preset voice to use."""
4408
+ )
4409
+
4410
+
4411
+ class PrebuiltVoiceConfigDict(TypedDict, total=False):
4412
+ """The configuration for the prebuilt speaker to use."""
4413
+
4414
+ voice_name: Optional[str]
4415
+ """The name of the preset voice to use."""
4416
+
4417
+
4418
+ PrebuiltVoiceConfigOrDict = Union[PrebuiltVoiceConfig, PrebuiltVoiceConfigDict]
4419
+
4420
+
4421
+ class VoiceConfig(_common.BaseModel):
4422
+
4423
+ replicated_voice_config: Optional[ReplicatedVoiceConfig] = Field(
4424
+ default=None,
4425
+ description="""If true, the model will use a replicated voice for the response.""",
4426
+ )
4427
+ prebuilt_voice_config: Optional[PrebuiltVoiceConfig] = Field(
4428
+ default=None,
4429
+ description="""The configuration for the prebuilt voice to use.""",
4430
+ )
4431
+
4432
+
4433
+ class VoiceConfigDict(TypedDict, total=False):
4434
+
4435
+ replicated_voice_config: Optional[ReplicatedVoiceConfigDict]
4436
+ """If true, the model will use a replicated voice for the response."""
4437
+
4438
+ prebuilt_voice_config: Optional[PrebuiltVoiceConfigDict]
4439
+ """The configuration for the prebuilt voice to use."""
4440
+
4441
+
4442
+ VoiceConfigOrDict = Union[VoiceConfig, VoiceConfigDict]
4443
+
4444
+
4445
+ class SpeakerVoiceConfig(_common.BaseModel):
4446
+ """Configuration for a single speaker in a multi speaker setup."""
4447
+
4448
+ speaker: Optional[str] = Field(
4449
+ default=None,
4450
+ description="""Required. The name of the speaker. This should be the same as the speaker name used in the prompt.""",
4451
+ )
4452
+ voice_config: Optional[VoiceConfig] = Field(
4453
+ default=None,
4454
+ description="""Required. The configuration for the voice of this speaker.""",
4455
+ )
4456
+
4457
+
4458
+ class SpeakerVoiceConfigDict(TypedDict, total=False):
4459
+ """Configuration for a single speaker in a multi speaker setup."""
4460
+
4461
+ speaker: Optional[str]
4462
+ """Required. The name of the speaker. This should be the same as the speaker name used in the prompt."""
4463
+
4464
+ voice_config: Optional[VoiceConfigDict]
4465
+ """Required. The configuration for the voice of this speaker."""
4466
+
4467
+
4468
+ SpeakerVoiceConfigOrDict = Union[SpeakerVoiceConfig, SpeakerVoiceConfigDict]
4469
+
4470
+
4471
+ class MultiSpeakerVoiceConfig(_common.BaseModel):
4472
+ """The configuration for the multi-speaker setup.
4473
+
4474
+ This data type is not supported in Vertex AI.
4475
+ """
4476
+
4477
+ speaker_voice_configs: Optional[list[SpeakerVoiceConfig]] = Field(
4478
+ default=None, description="""Required. All the enabled speaker voices."""
4479
+ )
4480
+
4481
+
4482
+ class MultiSpeakerVoiceConfigDict(TypedDict, total=False):
4483
+ """The configuration for the multi-speaker setup.
4484
+
4485
+ This data type is not supported in Vertex AI.
4486
+ """
4487
+
4488
+ speaker_voice_configs: Optional[list[SpeakerVoiceConfigDict]]
4489
+ """Required. All the enabled speaker voices."""
4490
+
4491
+
4492
+ MultiSpeakerVoiceConfigOrDict = Union[
4493
+ MultiSpeakerVoiceConfig, MultiSpeakerVoiceConfigDict
4494
+ ]
4495
+
4496
+
4497
+ class SpeechConfig(_common.BaseModel):
4498
+
4499
+ voice_config: Optional[VoiceConfig] = Field(
4500
+ default=None,
4501
+ description="""Configuration for the voice of the response.""",
4502
+ )
4503
+ language_code: Optional[str] = Field(
4504
+ default=None,
4505
+ description="""Optional. Language code (ISO 639. e.g. en-US) for the speech synthesization.""",
4506
+ )
4507
+ multi_speaker_voice_config: Optional[MultiSpeakerVoiceConfig] = Field(
4508
+ default=None,
4509
+ description="""Optional. The configuration for the multi-speaker setup. It is mutually exclusive with the voice_config field. This field is not supported in Vertex AI.""",
4510
+ )
4511
+
4512
+
4513
+ class SpeechConfigDict(TypedDict, total=False):
4514
+
4515
+ voice_config: Optional[VoiceConfigDict]
4516
+ """Configuration for the voice of the response."""
4517
+
4518
+ language_code: Optional[str]
4519
+ """Optional. Language code (ISO 639. e.g. en-US) for the speech synthesization."""
4520
+
4521
+ multi_speaker_voice_config: Optional[MultiSpeakerVoiceConfigDict]
4522
+ """Optional. The configuration for the multi-speaker setup. It is mutually exclusive with the voice_config field. This field is not supported in Vertex AI."""
4523
+
4524
+
4525
+ SpeechConfigOrDict = Union[SpeechConfig, SpeechConfigDict]
4526
+
4527
+
4371
4528
  class AutomaticFunctionCallingConfig(_common.BaseModel):
4372
4529
  """The configuration for automatic function calling."""
4373
4530
 
@@ -4792,38 +4949,6 @@ class SafetySettingDict(TypedDict, total=False):
4792
4949
  SafetySettingOrDict = Union[SafetySetting, SafetySettingDict]
4793
4950
 
4794
4951
 
4795
- class SpeechConfig(_common.BaseModel):
4796
- """The speech generation config."""
4797
-
4798
- language_code: Optional[str] = Field(
4799
- default=None,
4800
- description="""Optional. Language code (ISO 639. e.g. en-US) for the speech synthesization.""",
4801
- )
4802
- voice_config: Optional['VoiceConfig'] = Field(
4803
- default=None, description="""The configuration for the speaker to use."""
4804
- )
4805
- multi_speaker_voice_config: Optional['MultiSpeakerVoiceConfig'] = Field(
4806
- default=None,
4807
- description="""Optional. The configuration for the multi-speaker setup. It is mutually exclusive with the voice_config field. This field is not supported in Vertex AI.""",
4808
- )
4809
-
4810
-
4811
- class SpeechConfigDict(TypedDict, total=False):
4812
- """The speech generation config."""
4813
-
4814
- language_code: Optional[str]
4815
- """Optional. Language code (ISO 639. e.g. en-US) for the speech synthesization."""
4816
-
4817
- voice_config: Optional['VoiceConfigDict']
4818
- """The configuration for the speaker to use."""
4819
-
4820
- multi_speaker_voice_config: Optional['MultiSpeakerVoiceConfigDict']
4821
- """Optional. The configuration for the multi-speaker setup. It is mutually exclusive with the voice_config field. This field is not supported in Vertex AI."""
4822
-
4823
-
4824
- SpeechConfigOrDict = Union[SpeechConfig, SpeechConfigDict]
4825
-
4826
-
4827
4952
  SpeechConfigUnion = Union[str, SpeechConfig]
4828
4953
 
4829
4954
 
@@ -9068,95 +9193,6 @@ class DeleteModelResponseDict(TypedDict, total=False):
9068
9193
  DeleteModelResponseOrDict = Union[DeleteModelResponse, DeleteModelResponseDict]
9069
9194
 
9070
9195
 
9071
- class PrebuiltVoiceConfig(_common.BaseModel):
9072
- """The configuration for the prebuilt speaker to use."""
9073
-
9074
- voice_name: Optional[str] = Field(
9075
- default=None, description="""The name of the preset voice to use."""
9076
- )
9077
-
9078
-
9079
- class PrebuiltVoiceConfigDict(TypedDict, total=False):
9080
- """The configuration for the prebuilt speaker to use."""
9081
-
9082
- voice_name: Optional[str]
9083
- """The name of the preset voice to use."""
9084
-
9085
-
9086
- PrebuiltVoiceConfigOrDict = Union[PrebuiltVoiceConfig, PrebuiltVoiceConfigDict]
9087
-
9088
-
9089
- class VoiceConfig(_common.BaseModel):
9090
- """The configuration for the voice to use."""
9091
-
9092
- prebuilt_voice_config: Optional[PrebuiltVoiceConfig] = Field(
9093
- default=None,
9094
- description="""The configuration for the prebuilt voice to use.""",
9095
- )
9096
-
9097
-
9098
- class VoiceConfigDict(TypedDict, total=False):
9099
- """The configuration for the voice to use."""
9100
-
9101
- prebuilt_voice_config: Optional[PrebuiltVoiceConfigDict]
9102
- """The configuration for the prebuilt voice to use."""
9103
-
9104
-
9105
- VoiceConfigOrDict = Union[VoiceConfig, VoiceConfigDict]
9106
-
9107
-
9108
- class SpeakerVoiceConfig(_common.BaseModel):
9109
- """Configuration for a single speaker in a multi speaker setup."""
9110
-
9111
- speaker: Optional[str] = Field(
9112
- default=None,
9113
- description="""Required. The name of the speaker. This should be the same as the speaker name used in the prompt.""",
9114
- )
9115
- voice_config: Optional[VoiceConfig] = Field(
9116
- default=None,
9117
- description="""Required. The configuration for the voice of this speaker.""",
9118
- )
9119
-
9120
-
9121
- class SpeakerVoiceConfigDict(TypedDict, total=False):
9122
- """Configuration for a single speaker in a multi speaker setup."""
9123
-
9124
- speaker: Optional[str]
9125
- """Required. The name of the speaker. This should be the same as the speaker name used in the prompt."""
9126
-
9127
- voice_config: Optional[VoiceConfigDict]
9128
- """Required. The configuration for the voice of this speaker."""
9129
-
9130
-
9131
- SpeakerVoiceConfigOrDict = Union[SpeakerVoiceConfig, SpeakerVoiceConfigDict]
9132
-
9133
-
9134
- class MultiSpeakerVoiceConfig(_common.BaseModel):
9135
- """The configuration for the multi-speaker setup.
9136
-
9137
- This data type is not supported in Vertex AI.
9138
- """
9139
-
9140
- speaker_voice_configs: Optional[list[SpeakerVoiceConfig]] = Field(
9141
- default=None, description="""Required. All the enabled speaker voices."""
9142
- )
9143
-
9144
-
9145
- class MultiSpeakerVoiceConfigDict(TypedDict, total=False):
9146
- """The configuration for the multi-speaker setup.
9147
-
9148
- This data type is not supported in Vertex AI.
9149
- """
9150
-
9151
- speaker_voice_configs: Optional[list[SpeakerVoiceConfigDict]]
9152
- """Required. All the enabled speaker voices."""
9153
-
9154
-
9155
- MultiSpeakerVoiceConfigOrDict = Union[
9156
- MultiSpeakerVoiceConfig, MultiSpeakerVoiceConfigDict
9157
- ]
9158
-
9159
-
9160
9196
  class GenerationConfig(_common.BaseModel):
9161
9197
  """Generation config."""
9162
9198
 
@@ -13,4 +13,4 @@
13
13
  # limitations under the License.
14
14
  #
15
15
 
16
- __version__ = '1.53.0' # x-release-please-version
16
+ __version__ = '1.54.0' # x-release-please-version
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-genai
3
- Version: 1.53.0
3
+ Version: 1.54.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License-Expression: Apache-2.0
@@ -213,7 +213,7 @@ await aclient.aclose()
213
213
  ## Client context managers
214
214
 
215
215
  By using the sync client context manager, it will close the underlying
216
- sync client when exiting the with block.
216
+ sync client when exiting the with block and avoid httpx "client has been closed" error like [issues#1763](https://github.com/googleapis/python-genai/issues/1763).
217
217
 
218
218
  ```python
219
219
  from google.genai import Client
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel", "twine>=6.1.0", "packaging>=24.2", "pkginfo>=
3
3
 
4
4
  [project]
5
5
  name = "google-genai"
6
- version = "1.53.0"
6
+ version = "1.54.0"
7
7
  description = "GenAI Python SDK"
8
8
  readme = "README.md"
9
9
  license = "Apache-2.0"
File without changes
File without changes
@@ -1265,12 +1265,12 @@ def _SpeechConfig_to_vertex(
1265
1265
  parent_object: Optional[dict[str, Any]] = None,
1266
1266
  ) -> dict[str, Any]:
1267
1267
  to_object: dict[str, Any] = {}
1268
- if getv(from_object, ['language_code']) is not None:
1269
- setv(to_object, ['languageCode'], getv(from_object, ['language_code']))
1270
-
1271
1268
  if getv(from_object, ['voice_config']) is not None:
1272
1269
  setv(to_object, ['voiceConfig'], getv(from_object, ['voice_config']))
1273
1270
 
1271
+ if getv(from_object, ['language_code']) is not None:
1272
+ setv(to_object, ['languageCode'], getv(from_object, ['language_code']))
1273
+
1274
1274
  if getv(from_object, ['multi_speaker_voice_config']) is not None:
1275
1275
  raise ValueError(
1276
1276
  'multi_speaker_voice_config parameter is not supported in Vertex AI.'
@@ -1902,34 +1902,6 @@ class Batches(_api_module.BaseModule):
1902
1902
  self._api_client._verify_response(return_value)
1903
1903
  return return_value
1904
1904
 
1905
- def list(
1906
- self, *, config: Optional[types.ListBatchJobsConfigOrDict] = None
1907
- ) -> Pager[types.BatchJob]:
1908
- """Lists batch jobs.
1909
-
1910
- Args:
1911
- config (ListBatchJobsConfig): Optional configuration for the list request.
1912
-
1913
- Returns:
1914
- A Pager object that contains one page of batch jobs. When iterating over
1915
- the pager, it automatically fetches the next page if there are more.
1916
-
1917
- Usage:
1918
-
1919
- .. code-block:: python
1920
- config = {'page_size': 10}
1921
- for batch_job in client.batches.list(config):
1922
- print(batch_job.name)
1923
- """
1924
-
1925
- list_request = self._list
1926
- return Pager(
1927
- 'batch_jobs',
1928
- list_request,
1929
- self._list(config=config),
1930
- config,
1931
- )
1932
-
1933
1905
  def create(
1934
1906
  self,
1935
1907
  *,
@@ -2025,6 +1997,34 @@ class Batches(_api_module.BaseModule):
2025
1997
  else:
2026
1998
  return self._create_embeddings(model=model, src=src, config=config)
2027
1999
 
2000
+ def list(
2001
+ self, *, config: Optional[types.ListBatchJobsConfigOrDict] = None
2002
+ ) -> Pager[types.BatchJob]:
2003
+ """Lists batch jobs.
2004
+
2005
+ Args:
2006
+ config (ListBatchJobsConfig): Optional configuration for the list request.
2007
+
2008
+ Returns:
2009
+ A Pager object that contains one page of batch jobs. When iterating over
2010
+ the pager, it automatically fetches the next page if there are more.
2011
+
2012
+ Usage:
2013
+
2014
+ .. code-block:: python
2015
+ config = {'page_size': 10}
2016
+ for batch_job in client.batches.list(config):
2017
+ print(batch_job.name)
2018
+ """
2019
+
2020
+ list_request = self._list
2021
+ return Pager(
2022
+ 'batch_jobs',
2023
+ list_request,
2024
+ self._list(config=config),
2025
+ config,
2026
+ )
2027
+
2028
2028
 
2029
2029
  class AsyncBatches(_api_module.BaseModule):
2030
2030
 
@@ -2451,33 +2451,6 @@ class AsyncBatches(_api_module.BaseModule):
2451
2451
  self._api_client._verify_response(return_value)
2452
2452
  return return_value
2453
2453
 
2454
- async def list(
2455
- self, *, config: Optional[types.ListBatchJobsConfigOrDict] = None
2456
- ) -> AsyncPager[types.BatchJob]:
2457
- """Lists batch jobs asynchronously.
2458
-
2459
- Args:
2460
- config (ListBatchJobsConfig): Optional configuration for the list request.
2461
-
2462
- Returns:
2463
- A Pager object that contains one page of batch jobs. When iterating over
2464
- the pager, it automatically fetches the next page if there are more.
2465
-
2466
- Usage:
2467
-
2468
- .. code-block:: python
2469
- async for batch_job in await client.aio.batches.list():
2470
- print(batch_job.name)
2471
- """
2472
-
2473
- list_request = self._list
2474
- return AsyncPager(
2475
- 'batch_jobs',
2476
- list_request,
2477
- await self._list(config=config),
2478
- config,
2479
- )
2480
-
2481
2454
  async def create(
2482
2455
  self,
2483
2456
  *,
@@ -2578,3 +2551,30 @@ class AsyncBatches(_api_module.BaseModule):
2578
2551
  raise ValueError('Vertex AI does not support batches.create_embeddings.')
2579
2552
  else:
2580
2553
  return await self._create_embeddings(model=model, src=src, config=config)
2554
+
2555
+ async def list(
2556
+ self, *, config: Optional[types.ListBatchJobsConfigOrDict] = None
2557
+ ) -> AsyncPager[types.BatchJob]:
2558
+ """Lists batch jobs asynchronously.
2559
+
2560
+ Args:
2561
+ config (ListBatchJobsConfig): Optional configuration for the list request.
2562
+
2563
+ Returns:
2564
+ A Pager object that contains one page of batch jobs. When iterating over
2565
+ the pager, it automatically fetches the next page if there are more.
2566
+
2567
+ Usage:
2568
+
2569
+ .. code-block:: python
2570
+ async for batch_job in await client.aio.batches.list():
2571
+ print(batch_job.name)
2572
+ """
2573
+
2574
+ list_request = self._list
2575
+ return AsyncPager(
2576
+ 'batch_jobs',
2577
+ list_request,
2578
+ await self._list(config=config),
2579
+ config,
2580
+ )
@@ -695,36 +695,6 @@ class FileSearchStores(_api_module.BaseModule):
695
695
  self._api_client._verify_response(return_value)
696
696
  return return_value
697
697
 
698
- def list(
699
- self, *, config: Optional[types.ListFileSearchStoresConfigOrDict] = None
700
- ) -> Pager[types.FileSearchStore]:
701
- """Lists FileSearchStores.
702
-
703
- Args:
704
- config (ListFileSearchStoresConfig): Optional configuration for the list
705
- request.
706
-
707
- Returns:
708
- A Pager object that contains one page of file search stores. When
709
- iterating over
710
- the pager, it automatically fetches the next page if there are more.
711
-
712
- Usage:
713
-
714
- .. code-block:: python
715
- for file_search_store in client.file_search_stores.list():
716
- print(f"file search store: {file_search_store.name} -
717
- {file_search_store.display_name}")
718
- """
719
-
720
- list_request = self._list
721
- return Pager(
722
- 'file_search_stores',
723
- list_request,
724
- self._list(config=config),
725
- config,
726
- )
727
-
728
698
  def upload_to_file_search_store(
729
699
  self,
730
700
  *,
@@ -801,6 +771,36 @@ class FileSearchStores(_api_module.BaseModule):
801
771
  response=response_dict, kwargs={}
802
772
  )
803
773
 
774
+ def list(
775
+ self, *, config: Optional[types.ListFileSearchStoresConfigOrDict] = None
776
+ ) -> Pager[types.FileSearchStore]:
777
+ """Lists FileSearchStores.
778
+
779
+ Args:
780
+ config (ListFileSearchStoresConfig): Optional configuration for the list
781
+ request.
782
+
783
+ Returns:
784
+ A Pager object that contains one page of file search stores. When
785
+ iterating over
786
+ the pager, it automatically fetches the next page if there are more.
787
+
788
+ Usage:
789
+
790
+ .. code-block:: python
791
+ for file_search_store in client.file_search_stores.list():
792
+ print(f"file search store: {file_search_store.name} -
793
+ {file_search_store.display_name}")
794
+ """
795
+
796
+ list_request = self._list
797
+ return Pager(
798
+ 'file_search_stores',
799
+ list_request,
800
+ self._list(config=config),
801
+ config,
802
+ )
803
+
804
804
 
805
805
  class AsyncFileSearchStores(_api_module.BaseModule):
806
806
 
@@ -1189,36 +1189,6 @@ class AsyncFileSearchStores(_api_module.BaseModule):
1189
1189
  self._api_client._verify_response(return_value)
1190
1190
  return return_value
1191
1191
 
1192
- async def list(
1193
- self, *, config: Optional[types.ListFileSearchStoresConfigOrDict] = None
1194
- ) -> AsyncPager[types.FileSearchStore]:
1195
- """Lists FileSearchStores asynchronously.
1196
-
1197
- Args:
1198
- config (ListFileSearchStoresConfig): Optional parameters for the request,
1199
- such as page_size.
1200
-
1201
- Returns:
1202
- A Pager object that contains one page of FileSearchStores. When iterating
1203
- over
1204
- the pager, it automatically fetches the next page if there are more.
1205
-
1206
- Usage:
1207
-
1208
- .. code-block:: python
1209
- async for file_search_store in await client.aio.file_search_stores.list():
1210
- print(f"file search store: {file_search_store.name} -
1211
- {file_search_store.display_name}")
1212
- """
1213
-
1214
- list_request = self._list
1215
- return AsyncPager(
1216
- 'file_search_stores',
1217
- list_request,
1218
- await self._list(config=config),
1219
- config,
1220
- )
1221
-
1222
1192
  async def upload_to_file_search_store(
1223
1193
  self,
1224
1194
  *,
@@ -1294,3 +1264,33 @@ class AsyncFileSearchStores(_api_module.BaseModule):
1294
1264
  return types.UploadToFileSearchStoreOperation._from_response(
1295
1265
  response=response_dict, kwargs={}
1296
1266
  )
1267
+
1268
+ async def list(
1269
+ self, *, config: Optional[types.ListFileSearchStoresConfigOrDict] = None
1270
+ ) -> AsyncPager[types.FileSearchStore]:
1271
+ """Lists FileSearchStores asynchronously.
1272
+
1273
+ Args:
1274
+ config (ListFileSearchStoresConfig): Optional parameters for the request,
1275
+ such as page_size.
1276
+
1277
+ Returns:
1278
+ A Pager object that contains one page of FileSearchStores. When iterating
1279
+ over
1280
+ the pager, it automatically fetches the next page if there are more.
1281
+
1282
+ Usage:
1283
+
1284
+ .. code-block:: python
1285
+ async for file_search_store in await client.aio.file_search_stores.list():
1286
+ print(f"file search store: {file_search_store.name} -
1287
+ {file_search_store.display_name}")
1288
+ """
1289
+
1290
+ list_request = self._list
1291
+ return AsyncPager(
1292
+ 'file_search_stores',
1293
+ list_request,
1294
+ await self._list(config=config),
1295
+ config,
1296
+ )
@@ -402,34 +402,6 @@ class Files(_api_module.BaseModule):
402
402
  self._api_client._verify_response(return_value)
403
403
  return return_value
404
404
 
405
- def list(
406
- self, *, config: Optional[types.ListFilesConfigOrDict] = None
407
- ) -> Pager[types.File]:
408
- """Lists all files from the service.
409
-
410
- Args:
411
- config (ListFilesConfig): Optional, configuration for the list method.
412
-
413
- Returns:
414
- A Pager object that contains one page of files. When iterating over
415
- the pager, it automatically fetches the next page if there are more.
416
-
417
- Usage:
418
-
419
- .. code-block:: python
420
-
421
- for file in client.files.list(config={'page_size': 10}):
422
- print(file.name)
423
- """
424
-
425
- list_request = self._list
426
- return Pager(
427
- 'files',
428
- list_request,
429
- self._list(config=config),
430
- config,
431
- )
432
-
433
405
  def upload(
434
406
  self,
435
407
  *,
@@ -587,6 +559,34 @@ class Files(_api_module.BaseModule):
587
559
 
588
560
  return data
589
561
 
562
+ def list(
563
+ self, *, config: Optional[types.ListFilesConfigOrDict] = None
564
+ ) -> Pager[types.File]:
565
+ """Lists all files from the service.
566
+
567
+ Args:
568
+ config (ListFilesConfig): Optional, configuration for the list method.
569
+
570
+ Returns:
571
+ A Pager object that contains one page of files. When iterating over
572
+ the pager, it automatically fetches the next page if there are more.
573
+
574
+ Usage:
575
+
576
+ .. code-block:: python
577
+
578
+ for file in client.files.list(config={'page_size': 10}):
579
+ print(file.name)
580
+ """
581
+
582
+ list_request = self._list
583
+ return Pager(
584
+ 'files',
585
+ list_request,
586
+ self._list(config=config),
587
+ config,
588
+ )
589
+
590
590
 
591
591
  class AsyncFiles(_api_module.BaseModule):
592
592
 
@@ -845,34 +845,6 @@ class AsyncFiles(_api_module.BaseModule):
845
845
  self._api_client._verify_response(return_value)
846
846
  return return_value
847
847
 
848
- async def list(
849
- self, *, config: Optional[types.ListFilesConfigOrDict] = None
850
- ) -> AsyncPager[types.File]:
851
- """Lists all files from the service asynchronously.
852
-
853
- Args:
854
- config (ListFilesConfig): Optional, configuration for the list method.
855
-
856
- Returns:
857
- A Pager object that contains one page of files. When iterating over
858
- the pager, it automatically fetches the next page if there are more.
859
-
860
- Usage:
861
-
862
- .. code-block:: python
863
-
864
- async for file in await client.aio.files.list(config={'page_size': 10}):
865
- print(file.name)
866
- """
867
-
868
- list_request = self._list
869
- return AsyncPager(
870
- 'files',
871
- list_request,
872
- await self._list(config=config),
873
- config,
874
- )
875
-
876
848
  async def upload(
877
849
  self,
878
850
  *,
@@ -1019,3 +991,31 @@ class AsyncFiles(_api_module.BaseModule):
1019
991
  )
1020
992
 
1021
993
  return data
994
+
995
+ async def list(
996
+ self, *, config: Optional[types.ListFilesConfigOrDict] = None
997
+ ) -> AsyncPager[types.File]:
998
+ """Lists all files from the service asynchronously.
999
+
1000
+ Args:
1001
+ config (ListFilesConfig): Optional, configuration for the list method.
1002
+
1003
+ Returns:
1004
+ A Pager object that contains one page of files. When iterating over
1005
+ the pager, it automatically fetches the next page if there are more.
1006
+
1007
+ Usage:
1008
+
1009
+ .. code-block:: python
1010
+
1011
+ async for file in await client.aio.files.list(config={'page_size': 10}):
1012
+ print(file.name)
1013
+ """
1014
+
1015
+ list_request = self._list
1016
+ return AsyncPager(
1017
+ 'files',
1018
+ list_request,
1019
+ await self._list(config=config),
1020
+ config,
1021
+ )
@@ -3420,12 +3420,12 @@ def _SpeechConfig_to_vertex(
3420
3420
  parent_object: Optional[dict[str, Any]] = None,
3421
3421
  ) -> dict[str, Any]:
3422
3422
  to_object: dict[str, Any] = {}
3423
- if getv(from_object, ['language_code']) is not None:
3424
- setv(to_object, ['languageCode'], getv(from_object, ['language_code']))
3425
-
3426
3423
  if getv(from_object, ['voice_config']) is not None:
3427
3424
  setv(to_object, ['voiceConfig'], getv(from_object, ['voice_config']))
3428
3425
 
3426
+ if getv(from_object, ['language_code']) is not None:
3427
+ setv(to_object, ['languageCode'], getv(from_object, ['language_code']))
3428
+
3429
3429
  if getv(from_object, ['multi_speaker_voice_config']) is not None:
3430
3430
  raise ValueError(
3431
3431
  'multi_speaker_voice_config parameter is not supported in Vertex AI.'
@@ -867,12 +867,12 @@ def _SpeechConfig_to_vertex(
867
867
  root_object: Optional[Union[dict[str, Any], object]] = None,
868
868
  ) -> dict[str, Any]:
869
869
  to_object: dict[str, Any] = {}
870
- if getv(from_object, ['language_code']) is not None:
871
- setv(to_object, ['languageCode'], getv(from_object, ['language_code']))
872
-
873
870
  if getv(from_object, ['voice_config']) is not None:
874
871
  setv(to_object, ['voiceConfig'], getv(from_object, ['voice_config']))
875
872
 
873
+ if getv(from_object, ['language_code']) is not None:
874
+ setv(to_object, ['languageCode'], getv(from_object, ['language_code']))
875
+
876
876
  if getv(from_object, ['multi_speaker_voice_config']) is not None:
877
877
  raise ValueError(
878
878
  'multi_speaker_voice_config parameter is not supported in Vertex AI.'
@@ -1555,33 +1555,6 @@ class Tunings(_api_module.BaseModule):
1555
1555
  self._api_client._verify_response(return_value)
1556
1556
  return return_value
1557
1557
 
1558
- def list(
1559
- self, *, config: Optional[types.ListTuningJobsConfigOrDict] = None
1560
- ) -> Pager[types.TuningJob]:
1561
- """Lists `TuningJob` objects.
1562
-
1563
- Args:
1564
- config: The configuration for the list request.
1565
-
1566
- Returns:
1567
- A Pager object that contains one page of tuning jobs. When iterating over
1568
- the pager, it automatically fetches the next page if there are more.
1569
-
1570
- Usage:
1571
-
1572
- .. code-block:: python
1573
- for tuning_job in client.tunings.list():
1574
- print(tuning_job.name)
1575
- """
1576
-
1577
- list_request = self._list
1578
- return Pager(
1579
- 'tuning_jobs',
1580
- list_request,
1581
- self._list(config=config),
1582
- config,
1583
- )
1584
-
1585
1558
  def get(
1586
1559
  self,
1587
1560
  *,
@@ -1683,6 +1656,33 @@ class Tunings(_api_module.BaseModule):
1683
1656
  )
1684
1657
  return tuning_job
1685
1658
 
1659
+ def list(
1660
+ self, *, config: Optional[types.ListTuningJobsConfigOrDict] = None
1661
+ ) -> Pager[types.TuningJob]:
1662
+ """Lists `TuningJob` objects.
1663
+
1664
+ Args:
1665
+ config: The configuration for the list request.
1666
+
1667
+ Returns:
1668
+ A Pager object that contains one page of tuning jobs. When iterating over
1669
+ the pager, it automatically fetches the next page if there are more.
1670
+
1671
+ Usage:
1672
+
1673
+ .. code-block:: python
1674
+ for tuning_job in client.tunings.list():
1675
+ print(tuning_job.name)
1676
+ """
1677
+
1678
+ list_request = self._list
1679
+ return Pager(
1680
+ 'tuning_jobs',
1681
+ list_request,
1682
+ self._list(config=config),
1683
+ config,
1684
+ )
1685
+
1686
1686
 
1687
1687
  class AsyncTunings(_api_module.BaseModule):
1688
1688
 
@@ -2049,33 +2049,6 @@ class AsyncTunings(_api_module.BaseModule):
2049
2049
  self._api_client._verify_response(return_value)
2050
2050
  return return_value
2051
2051
 
2052
- async def list(
2053
- self, *, config: Optional[types.ListTuningJobsConfigOrDict] = None
2054
- ) -> AsyncPager[types.TuningJob]:
2055
- """Lists `TuningJob` objects asynchronously.
2056
-
2057
- Args:
2058
- config: The configuration for the list request.
2059
-
2060
- Returns:
2061
- A Pager object that contains one page of tuning jobs. When iterating over
2062
- the pager, it automatically fetches the next page if there are more.
2063
-
2064
- Usage:
2065
-
2066
- .. code-block:: python
2067
- async for tuning_job in await client.aio.tunings.list():
2068
- print(tuning_job.name)
2069
- """
2070
-
2071
- list_request = self._list
2072
- return AsyncPager(
2073
- 'tuning_jobs',
2074
- list_request,
2075
- await self._list(config=config),
2076
- config,
2077
- )
2078
-
2079
2052
  async def get(
2080
2053
  self,
2081
2054
  *,
@@ -2171,6 +2144,33 @@ class AsyncTunings(_api_module.BaseModule):
2171
2144
  )
2172
2145
  return tuning_job
2173
2146
 
2147
+ async def list(
2148
+ self, *, config: Optional[types.ListTuningJobsConfigOrDict] = None
2149
+ ) -> AsyncPager[types.TuningJob]:
2150
+ """Lists `TuningJob` objects asynchronously.
2151
+
2152
+ Args:
2153
+ config: The configuration for the list request.
2154
+
2155
+ Returns:
2156
+ A Pager object that contains one page of tuning jobs. When iterating over
2157
+ the pager, it automatically fetches the next page if there are more.
2158
+
2159
+ Usage:
2160
+
2161
+ .. code-block:: python
2162
+ async for tuning_job in await client.aio.tunings.list():
2163
+ print(tuning_job.name)
2164
+ """
2165
+
2166
+ list_request = self._list
2167
+ return AsyncPager(
2168
+ 'tuning_jobs',
2169
+ list_request,
2170
+ await self._list(config=config),
2171
+ config,
2172
+ )
2173
+
2174
2174
 
2175
2175
  class _IpythonUtils:
2176
2176
  """Temporary class to hold the IPython related functions."""
File without changes