google-genai 1.51.0__tar.gz → 1.53.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. {google_genai-1.51.0/google_genai.egg-info → google_genai-1.53.0}/PKG-INFO +51 -30
  2. {google_genai-1.51.0 → google_genai-1.53.0}/README.md +48 -27
  3. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/_api_client.py +65 -7
  4. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/_common.py +1 -0
  5. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/batches.py +55 -59
  6. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/caches.py +40 -20
  7. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/documents.py +3 -23
  8. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/file_search_stores.py +60 -76
  9. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/files.py +56 -54
  10. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/live.py +8 -1
  11. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/tunings.py +100 -22
  12. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/types.py +58 -6
  13. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/version.py +1 -1
  14. {google_genai-1.51.0 → google_genai-1.53.0/google_genai.egg-info}/PKG-INFO +51 -30
  15. {google_genai-1.51.0 → google_genai-1.53.0}/google_genai.egg-info/requires.txt +2 -2
  16. {google_genai-1.51.0 → google_genai-1.53.0}/pyproject.toml +12 -8
  17. {google_genai-1.51.0 → google_genai-1.53.0}/LICENSE +0 -0
  18. {google_genai-1.51.0 → google_genai-1.53.0}/MANIFEST.in +0 -0
  19. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/__init__.py +0 -0
  20. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/_adapters.py +0 -0
  21. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/_api_module.py +0 -0
  22. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/_automatic_function_calling_util.py +0 -0
  23. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/_base_transformers.py +0 -0
  24. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/_base_url.py +0 -0
  25. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/_extra_utils.py +0 -0
  26. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/_live_converters.py +0 -0
  27. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/_local_tokenizer_loader.py +0 -0
  28. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/_mcp_utils.py +0 -0
  29. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/_operations_converters.py +0 -0
  30. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/_replay_api_client.py +0 -0
  31. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/_test_api_client.py +0 -0
  32. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/_tokens_converters.py +0 -0
  33. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/_transformers.py +0 -0
  34. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/chats.py +0 -0
  35. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/client.py +0 -0
  36. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/errors.py +0 -0
  37. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/live_music.py +0 -0
  38. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/local_tokenizer.py +0 -0
  39. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/models.py +0 -0
  40. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/operations.py +0 -0
  41. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/pagers.py +0 -0
  42. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/py.typed +0 -0
  43. {google_genai-1.51.0 → google_genai-1.53.0}/google/genai/tokens.py +0 -0
  44. {google_genai-1.51.0 → google_genai-1.53.0}/google_genai.egg-info/SOURCES.txt +0 -0
  45. {google_genai-1.51.0 → google_genai-1.53.0}/google_genai.egg-info/dependency_links.txt +0 -0
  46. {google_genai-1.51.0 → google_genai-1.53.0}/google_genai.egg-info/top_level.txt +0 -0
  47. {google_genai-1.51.0 → google_genai-1.53.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-genai
3
- Version: 1.51.0
3
+ Version: 1.53.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License-Expression: Apache-2.0
@@ -20,7 +20,7 @@ Requires-Python: >=3.10
20
20
  Description-Content-Type: text/markdown
21
21
  License-File: LICENSE
22
22
  Requires-Dist: anyio<5.0.0,>=4.8.0
23
- Requires-Dist: google-auth<3.0.0,>=2.14.1
23
+ Requires-Dist: google-auth[requests]<3.0.0,>=2.14.1
24
24
  Requires-Dist: httpx<1.0.0,>=0.28.1
25
25
  Requires-Dist: pydantic<3.0.0,>=2.9.0
26
26
  Requires-Dist: requests<3.0.0,>=2.28.1
@@ -28,7 +28,7 @@ Requires-Dist: tenacity<9.2.0,>=8.2.3
28
28
  Requires-Dist: websockets<15.1.0,>=13.0.0
29
29
  Requires-Dist: typing-extensions<5.0.0,>=4.11.0
30
30
  Provides-Extra: aiohttp
31
- Requires-Dist: aiohttp<4.0.0; extra == "aiohttp"
31
+ Requires-Dist: aiohttp<3.13.3; extra == "aiohttp"
32
32
  Provides-Extra: local-tokenizer
33
33
  Requires-Dist: sentencepiece>=0.2.0; extra == "local-tokenizer"
34
34
  Requires-Dist: protobuf; extra == "local-tokenizer"
@@ -51,6 +51,16 @@ Google's generative models into their Python applications. It supports the
51
51
  [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview)
52
52
  APIs.
53
53
 
54
+ ## Code Generation
55
+
56
+ Generative models are often unaware of recent API and SDK updates and may suggest outdated or legacy code.
57
+
58
+ We recommend using our Code Generation instructions [codegen_instructions.md](https://raw.githubusercontent.com/googleapis/python-genai/refs/heads/main/codegen_instructions.md) when generating Google Gen AI SDK code to guide your model towards using the more recent SDK features.
59
+
60
+ Copy and paste the instructions from [this file](https://raw.githubusercontent.com/googleapis/python-genai/refs/heads/main/codegen_instructions.md)
61
+ into your development environment to provide the model with the necessary
62
+ context
63
+
54
64
  ## Installation
55
65
 
56
66
  ```sh
@@ -91,6 +101,44 @@ client = genai.Client(
91
101
  )
92
102
  ```
93
103
 
104
+ ## Using types
105
+
106
+ All API methods support Pydantic types and dictionaries, which you can access
107
+ from `google.genai.types`. You can import the types module with the following:
108
+
109
+ ```python
110
+ from google.genai import types
111
+ ```
112
+
113
+ Below is an example `generate_content()` call using types from the types module:
114
+
115
+ ```python
116
+ response = client.models.generate_content(
117
+ model='gemini-2.0-flash-001',
118
+ contents=types.Part.from_text(text='Why is the sky blue?'),
119
+ config=types.GenerateContentConfig(
120
+ temperature=0,
121
+ top_p=0.95,
122
+ top_k=20,
123
+ ),
124
+ )
125
+ ```
126
+
127
+ Alternatively, you can accomplish the same request using dictionaries instead of
128
+ types:
129
+
130
+ ```python
131
+ response = client.models.generate_content(
132
+ model='gemini-2.0-flash-001',
133
+ contents={'text': 'Why is the sky blue?'},
134
+ config={
135
+ 'temperature': 0,
136
+ 'top_p': 0.95,
137
+ 'top_k': 20,
138
+ },
139
+ )
140
+ ```
141
+
94
142
  **(Optional) Using environment variables:**
95
143
 
96
144
  You can create a client by configuring the necessary environment variables.
@@ -583,33 +631,6 @@ response = client.models.generate_content(
583
631
  print(response.text)
584
632
  ```
585
633
 
586
- ### Typed Config
587
-
588
- All API methods support Pydantic types for parameters as well as
589
- dictionaries. You can get the type from `google.genai.types`.
590
-
591
- ```python
592
- from google.genai import types
593
-
594
- response = client.models.generate_content(
595
- model='gemini-2.0-flash-001',
596
- contents=types.Part.from_text(text='Why is the sky blue?'),
597
- config=types.GenerateContentConfig(
598
- temperature=0,
599
- top_p=0.95,
600
- top_k=20,
601
- candidate_count=1,
602
- seed=5,
603
- max_output_tokens=100,
604
- stop_sequences=['STOP!'],
605
- presence_penalty=0.0,
606
- frequency_penalty=0.0,
607
- ),
608
- )
609
-
610
- print(response.text)
611
- ```
612
-
613
634
  ### List Base Models
614
635
 
615
636
  To retrieve tuned models, see [list tuned models](#list-tuned-models).
@@ -15,6 +15,16 @@ Google's generative models into their Python applications. It supports the
15
15
  [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview)
16
16
  APIs.
17
17
 
18
+ ## Code Generation
19
+
20
+ Generative models are often unaware of recent API and SDK updates and may suggest outdated or legacy code.
21
+
22
+ We recommend using our Code Generation instructions [codegen_instructions.md](https://raw.githubusercontent.com/googleapis/python-genai/refs/heads/main/codegen_instructions.md) when generating Google Gen AI SDK code to guide your model towards using the more recent SDK features.
23
+
24
+ Copy and paste the instructions from [this file](https://raw.githubusercontent.com/googleapis/python-genai/refs/heads/main/codegen_instructions.md)
25
+ into your development environment to provide the model with the necessary
26
+ context
27
+
18
28
  ## Installation
19
29
 
20
30
  ```sh
@@ -55,6 +65,44 @@ client = genai.Client(
55
65
  )
56
66
  ```
57
67
 
68
+ ## Using types
69
+
70
+ All API methods support Pydantic types and dictionaries, which you can access
71
+ from `google.genai.types`. You can import the types module with the following:
72
+
73
+ ```python
74
+ from google.genai import types
75
+ ```
76
+
77
+ Below is an example `generate_content()` call using types from the types module:
78
+
79
+ ```python
80
+ response = client.models.generate_content(
81
+ model='gemini-2.0-flash-001',
82
+ contents=types.Part.from_text(text='Why is the sky blue?'),
83
+ config=types.GenerateContentConfig(
84
+ temperature=0,
85
+ top_p=0.95,
86
+ top_k=20,
87
+ ),
88
+ )
89
+ ```
90
+
91
+ Alternatively, you can accomplish the same request using dictionaries instead of
92
+ types:
93
+
94
+ ```python
95
+ response = client.models.generate_content(
96
+ model='gemini-2.0-flash-001',
97
+ contents={'text': 'Why is the sky blue?'},
98
+ config={
99
+ 'temperature': 0,
100
+ 'top_p': 0.95,
101
+ 'top_k': 20,
102
+ },
103
+ )
104
+ ```
105
+
58
106
  **(Optional) Using environment variables:**
59
107
 
60
108
  You can create a client by configuring the necessary environment variables.
@@ -547,33 +595,6 @@ response = client.models.generate_content(
547
595
  print(response.text)
548
596
  ```
549
597
 
550
- ### Typed Config
551
-
552
- All API methods support Pydantic types for parameters as well as
553
- dictionaries. You can get the type from `google.genai.types`.
554
-
555
- ```python
556
- from google.genai import types
557
-
558
- response = client.models.generate_content(
559
- model='gemini-2.0-flash-001',
560
- contents=types.Part.from_text(text='Why is the sky blue?'),
561
- config=types.GenerateContentConfig(
562
- temperature=0,
563
- top_p=0.95,
564
- top_k=20,
565
- candidate_count=1,
566
- seed=5,
567
- max_output_tokens=100,
568
- stop_sequences=['STOP!'],
569
- presence_penalty=0.0,
570
- frequency_penalty=0.0,
571
- ),
572
- )
573
-
574
- print(response.text)
575
- ```
576
-
577
598
  ### List Base Models
578
599
 
579
600
  To retrieve tuned models, see [list tuned models](#list-tuned-models).
@@ -37,6 +37,7 @@ import time
37
37
  from typing import Any, AsyncIterator, Iterator, Optional, Tuple, TYPE_CHECKING, Union
38
38
  from urllib.parse import urlparse
39
39
  from urllib.parse import urlunparse
40
+ import warnings
40
41
 
41
42
  import anyio
42
43
  import certifi
@@ -55,6 +56,7 @@ from .types import HttpOptions
55
56
  from .types import HttpOptionsOrDict
56
57
  from .types import HttpResponse as SdkHttpResponse
57
58
  from .types import HttpRetryOptions
59
+ from .types import ResourceScope
58
60
 
59
61
 
60
62
  try:
@@ -578,6 +580,12 @@ class BaseApiClient:
578
580
  elif isinstance(http_options, HttpOptions):
579
581
  validated_http_options = http_options
580
582
 
583
+ if validated_http_options.base_url_resource_scope and not validated_http_options.base_url:
584
+ # base_url_resource_scope is only valid when base_url is set.
585
+ raise ValueError(
586
+ 'base_url must be set when base_url_resource_scope is set.'
587
+ )
588
+
581
589
  # Retrieve implicitly set values from the environment.
582
590
  env_project = os.environ.get('GOOGLE_CLOUD_PROJECT', None)
583
591
  env_location = os.environ.get('GOOGLE_CLOUD_LOCATION', None)
@@ -727,10 +735,44 @@ class BaseApiClient:
727
735
 
728
736
  async def _get_aiohttp_session(self) -> 'aiohttp.ClientSession':
729
737
  """Returns the aiohttp client session."""
730
- if self._aiohttp_session is None or self._aiohttp_session.closed:
738
+ if (
739
+ self._aiohttp_session is None
740
+ or self._aiohttp_session.closed
741
+ or self._aiohttp_session._loop.is_closed() # pylint: disable=protected-access
742
+ ):
731
743
  # Initialize the aiohttp client session if it's not set up or closed.
732
- self._aiohttp_session = aiohttp.ClientSession(
733
- connector=aiohttp.TCPConnector(limit=0),
744
+ class AiohttpClientSession(aiohttp.ClientSession): # type: ignore[misc]
745
+
746
+ def __del__(self, _warnings: Any = warnings) -> None:
747
+ if not self.closed:
748
+ context = {
749
+ 'client_session': self,
750
+ 'message': 'Unclosed client session',
751
+ }
752
+ if self._source_traceback is not None:
753
+ context['source_traceback'] = self._source_traceback
754
+ # Remove this self._loop.call_exception_handler(context)
755
+
756
+ class AiohttpTCPConnector(aiohttp.TCPConnector): # type: ignore[misc]
757
+
758
+ def __del__(self, _warnings: Any = warnings) -> None:
759
+ if self._closed:
760
+ return
761
+ if not self._conns:
762
+ return
763
+ conns = [repr(c) for c in self._conns.values()]
764
+ # After v3.13.2, it may change to self._close_immediately()
765
+ self._close()
766
+ context = {
767
+ 'connector': self,
768
+ 'connections': conns,
769
+ 'message': 'Unclosed connector',
770
+ }
771
+ if self._source_traceback is not None:
772
+ context['source_traceback'] = self._source_traceback
773
+ # Remove this self._loop.call_exception_handler(context)
774
+ self._aiohttp_session = AiohttpClientSession(
775
+ connector=AiohttpTCPConnector(limit=0),
734
776
  trust_env=True,
735
777
  read_bufsize=READ_BUFFER_SIZE,
736
778
  )
@@ -1044,6 +1086,11 @@ class BaseApiClient:
1044
1086
  and not path.startswith('projects/')
1045
1087
  and not query_vertex_base_models
1046
1088
  and (self.project or self.location)
1089
+ and not (
1090
+ self.custom_base_url
1091
+ and patched_http_options.base_url_resource_scope
1092
+ == ResourceScope.COLLECTION
1093
+ )
1047
1094
  ):
1048
1095
  path = f'projects/{self.project}/locations/{self.location}/' + path
1049
1096
 
@@ -1073,10 +1120,21 @@ class BaseApiClient:
1073
1120
  or (self.project and self.location)
1074
1121
  or self.api_key
1075
1122
  ):
1076
- url = join_url_path(
1077
- base_url,
1078
- versioned_path,
1079
- )
1123
+ if (
1124
+ patched_http_options.base_url_resource_scope
1125
+ == ResourceScope.COLLECTION
1126
+ ):
1127
+ url = join_url_path(base_url, path)
1128
+ else:
1129
+ url = join_url_path(
1130
+ base_url,
1131
+ versioned_path,
1132
+ )
1133
+ elif(
1134
+ self.custom_base_url
1135
+ and patched_http_options.base_url_resource_scope == ResourceScope.COLLECTION
1136
+ ):
1137
+ url = join_url_path(base_url, path)
1080
1138
 
1081
1139
  if self.api_key and self.api_key.startswith('auth_tokens/'):
1082
1140
  raise EphemeralTokenAPIKeyError(
@@ -588,6 +588,7 @@ class BaseModel(pydantic.BaseModel):
588
588
  expected_type = non_none_types[0]
589
589
 
590
590
  if (isinstance(expected_type, type) and
591
+ get_origin(expected_type) is None and
591
592
  issubclass(expected_type, pydantic.BaseModel) and
592
593
  isinstance(value, pydantic.BaseModel) and
593
594
  not isinstance(value, expected_type)):
@@ -1902,6 +1902,34 @@ class Batches(_api_module.BaseModule):
1902
1902
  self._api_client._verify_response(return_value)
1903
1903
  return return_value
1904
1904
 
1905
+ def list(
1906
+ self, *, config: Optional[types.ListBatchJobsConfigOrDict] = None
1907
+ ) -> Pager[types.BatchJob]:
1908
+ """Lists batch jobs.
1909
+
1910
+ Args:
1911
+ config (ListBatchJobsConfig): Optional configuration for the list request.
1912
+
1913
+ Returns:
1914
+ A Pager object that contains one page of batch jobs. When iterating over
1915
+ the pager, it automatically fetches the next page if there are more.
1916
+
1917
+ Usage:
1918
+
1919
+ .. code-block:: python
1920
+ config = {'page_size': 10}
1921
+ for batch_job in client.batches.list(config):
1922
+ print(batch_job.name)
1923
+ """
1924
+
1925
+ list_request = self._list
1926
+ return Pager(
1927
+ 'batch_jobs',
1928
+ list_request,
1929
+ self._list(config=config),
1930
+ config,
1931
+ )
1932
+
1905
1933
  def create(
1906
1934
  self,
1907
1935
  *,
@@ -1997,35 +2025,6 @@ class Batches(_api_module.BaseModule):
1997
2025
  else:
1998
2026
  return self._create_embeddings(model=model, src=src, config=config)
1999
2027
 
2000
- def list(
2001
- self, *, config: Optional[types.ListBatchJobsConfigOrDict] = None
2002
- ) -> Pager[types.BatchJob]:
2003
- """Lists batch jobs.
2004
-
2005
- Args:
2006
- config (ListBatchJobsConfig): Optional configuration for the list request.
2007
-
2008
- Returns:
2009
- A Pager object that contains one page of batch jobs. When iterating over
2010
- the pager, it automatically fetches the next page if there are more.
2011
-
2012
- Usage:
2013
-
2014
- .. code-block:: python
2015
-
2016
- batch_jobs = client.batches.list(config={"page_size": 10})
2017
- for batch_job in batch_jobs:
2018
- print(f"Batch job: {batch_job.name}, state {batch_job.state}")
2019
- """
2020
- if config is None:
2021
- config = types.ListBatchJobsConfig()
2022
- return Pager(
2023
- 'batch_jobs',
2024
- self._list,
2025
- self._list(config=config),
2026
- config,
2027
- )
2028
-
2029
2028
 
2030
2029
  class AsyncBatches(_api_module.BaseModule):
2031
2030
 
@@ -2452,6 +2451,33 @@ class AsyncBatches(_api_module.BaseModule):
2452
2451
  self._api_client._verify_response(return_value)
2453
2452
  return return_value
2454
2453
 
2454
+ async def list(
2455
+ self, *, config: Optional[types.ListBatchJobsConfigOrDict] = None
2456
+ ) -> AsyncPager[types.BatchJob]:
2457
+ """Lists batch jobs asynchronously.
2458
+
2459
+ Args:
2460
+ config (ListBatchJobsConfig): Optional configuration for the list request.
2461
+
2462
+ Returns:
2463
+ A Pager object that contains one page of batch jobs. When iterating over
2464
+ the pager, it automatically fetches the next page if there are more.
2465
+
2466
+ Usage:
2467
+
2468
+ .. code-block:: python
2469
+ async for batch_job in await client.aio.batches.list():
2470
+ print(batch_job.name)
2471
+ """
2472
+
2473
+ list_request = self._list
2474
+ return AsyncPager(
2475
+ 'batch_jobs',
2476
+ list_request,
2477
+ await self._list(config=config),
2478
+ config,
2479
+ )
2480
+
2455
2481
  async def create(
2456
2482
  self,
2457
2483
  *,
@@ -2552,33 +2578,3 @@ class AsyncBatches(_api_module.BaseModule):
2552
2578
  raise ValueError('Vertex AI does not support batches.create_embeddings.')
2553
2579
  else:
2554
2580
  return await self._create_embeddings(model=model, src=src, config=config)
2555
-
2556
- async def list(
2557
- self, *, config: Optional[types.ListBatchJobsConfigOrDict] = None
2558
- ) -> AsyncPager[types.BatchJob]:
2559
- """Lists batch jobs asynchronously.
2560
-
2561
- Args:
2562
- config (ListBatchJobsConfig): Optional configuration for the list request.
2563
-
2564
- Returns:
2565
- A Pager object that contains one page of batch jobs. When iterating over
2566
- the pager, it automatically fetches the next page if there are more.
2567
-
2568
- Usage:
2569
-
2570
- .. code-block:: python
2571
-
2572
- batch_jobs = await client.aio.batches.list(config={'page_size': 5})
2573
- print(f"current page: {batch_jobs.page}")
2574
- await batch_jobs_pager.next_page()
2575
- print(f"next page: {batch_jobs_pager.page}")
2576
- """
2577
- if config is None:
2578
- config = types.ListBatchJobsConfig()
2579
- return AsyncPager(
2580
- 'batch_jobs',
2581
- self._list,
2582
- await self._list(config=config),
2583
- config,
2584
- )
@@ -1129,15 +1129,6 @@ class Caches(_api_module.BaseModule):
1129
1129
  def _list(
1130
1130
  self, *, config: Optional[types.ListCachedContentsConfigOrDict] = None
1131
1131
  ) -> types.ListCachedContentsResponse:
1132
- """Lists cached content configurations.
1133
-
1134
- .. code-block:: python
1135
-
1136
- cached_contents = client.caches.list(config={'page_size': 2})
1137
- for cached_content in cached_contents:
1138
- print(cached_content)
1139
- """
1140
-
1141
1132
  parameter_model = types._ListCachedContentsParameters(
1142
1133
  config=config,
1143
1134
  )
@@ -1196,9 +1187,28 @@ class Caches(_api_module.BaseModule):
1196
1187
  def list(
1197
1188
  self, *, config: Optional[types.ListCachedContentsConfigOrDict] = None
1198
1189
  ) -> Pager[types.CachedContent]:
1190
+ """Lists cached contents.
1191
+
1192
+ Args:
1193
+ config (ListCachedContentsConfig): Optional configuration for the list
1194
+ request.
1195
+
1196
+ Returns:
1197
+ A Pager object that contains one page of cached contents. When iterating
1198
+ over
1199
+ the pager, it automatically fetches the next page if there are more.
1200
+
1201
+ Usage:
1202
+
1203
+ .. code-block:: python
1204
+ for cached_content in client.caches.list():
1205
+ print(cached_content.name)
1206
+ """
1207
+
1208
+ list_request = self._list
1199
1209
  return Pager(
1200
1210
  'cached_contents',
1201
- self._list,
1211
+ list_request,
1202
1212
  self._list(config=config),
1203
1213
  config,
1204
1214
  )
@@ -1505,15 +1515,6 @@ class AsyncCaches(_api_module.BaseModule):
1505
1515
  async def _list(
1506
1516
  self, *, config: Optional[types.ListCachedContentsConfigOrDict] = None
1507
1517
  ) -> types.ListCachedContentsResponse:
1508
- """Lists cached content configurations.
1509
-
1510
- .. code-block:: python
1511
-
1512
- cached_contents = await client.aio.caches.list(config={'page_size': 2})
1513
- async for cached_content in cached_contents:
1514
- print(cached_content)
1515
- """
1516
-
1517
1518
  parameter_model = types._ListCachedContentsParameters(
1518
1519
  config=config,
1519
1520
  )
@@ -1574,9 +1575,28 @@ class AsyncCaches(_api_module.BaseModule):
1574
1575
  async def list(
1575
1576
  self, *, config: Optional[types.ListCachedContentsConfigOrDict] = None
1576
1577
  ) -> AsyncPager[types.CachedContent]:
1578
+ """Lists cached contents asynchronously.
1579
+
1580
+ Args:
1581
+ config (ListCachedContentsConfig): Optional configuration for the list
1582
+ request.
1583
+
1584
+ Returns:
1585
+ A Pager object that contains one page of cached contents. When iterating
1586
+ over
1587
+ the pager, it automatically fetches the next page if there are more.
1588
+
1589
+ Usage:
1590
+
1591
+ .. code-block:: python
1592
+ async for cached_content in await client.aio.caches.list():
1593
+ print(cached_content.name)
1594
+ """
1595
+
1596
+ list_request = self._list
1577
1597
  return AsyncPager(
1578
1598
  'cached_contents',
1579
- self._list,
1599
+ list_request,
1580
1600
  await self._list(config=config),
1581
1601
  config,
1582
1602
  )
@@ -249,17 +249,6 @@ class Documents(_api_module.BaseModule):
249
249
  parent: str,
250
250
  config: Optional[types.ListDocumentsConfigOrDict] = None,
251
251
  ) -> types.ListDocumentsResponse:
252
- """Lists all Documents in a FileSearchStore.
253
-
254
- Args:
255
- parent (str): The name of the FileSearchStore containing the Documents.
256
- config (ListDocumentsConfig | None): Optional parameters for the request,
257
- such as page_size.
258
-
259
- Returns:
260
- ListDocumentsResponse: A paginated list of Documents.
261
- """
262
-
263
252
  parameter_model = types._ListDocumentsParameters(
264
253
  parent=parent,
265
254
  config=config,
@@ -328,6 +317,7 @@ class Documents(_api_module.BaseModule):
328
317
  for document in client.documents.list(parent='rag_store_name'):
329
318
  print(f"document: {document.name} - {document.display_name}")
330
319
  """
320
+
331
321
  list_request = partial(self._list, parent=parent)
332
322
  return Pager(
333
323
  'documents',
@@ -461,17 +451,6 @@ class AsyncDocuments(_api_module.BaseModule):
461
451
  parent: str,
462
452
  config: Optional[types.ListDocumentsConfigOrDict] = None,
463
453
  ) -> types.ListDocumentsResponse:
464
- """Lists all Documents in a FileSearchStore.
465
-
466
- Args:
467
- parent (str): The name of the FileSearchStore containing the Documents.
468
- config (ListDocumentsConfig | None): Optional parameters for the request,
469
- such as page_size.
470
-
471
- Returns:
472
- ListDocumentsResponse: A paginated list of Documents.
473
- """
474
-
475
454
  parameter_model = types._ListDocumentsParameters(
476
455
  parent=parent,
477
456
  config=config,
@@ -540,9 +519,10 @@ class AsyncDocuments(_api_module.BaseModule):
540
519
  Usage:
541
520
  .. code-block:: python
542
521
  async for document in await
543
- client.documents.list(parent='rag_store_name'):
522
+ client.aio.documents.list(parent='rag_store_name'):
544
523
  print(f"document: {document.name} - {document.display_name}")
545
524
  """
525
+
546
526
  list_request = partial(self._list, parent=parent)
547
527
  return AsyncPager(
548
528
  'documents',