google-genai 0.0.1__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/__init__.py CHANGED
@@ -17,4 +17,6 @@
17
17
 
18
18
  from .client import Client
19
19
 
20
+ __version__ = '0.2.0'
21
+
20
22
  __all__ = ['Client']
@@ -24,7 +24,7 @@ import json
24
24
  import os
25
25
  import sys
26
26
  from typing import Any, Optional, TypedDict, Union
27
- import urllib
27
+ from urllib.parse import urlparse, urlunparse
28
28
 
29
29
  import google.auth
30
30
  import google.auth.credentials
@@ -51,7 +51,7 @@ class HttpOptions(TypedDict):
51
51
  def _append_library_version_headers(headers: dict[str, str]) -> None:
52
52
  """Appends the telemetry header to the headers dict."""
53
53
  # TODO: Automate revisions to the SDK library version.
54
- library_label = 'google-genai-sdk/0.1.0'
54
+ library_label = f'google-genai-sdk/0.2.0'
55
55
  language_label = 'gl-python/' + sys.version.split()[0]
56
56
  version_header_value = f'{library_label} {language_label}'
57
57
  if (
@@ -89,6 +89,13 @@ def _patch_http_options(
89
89
  return copy_option
90
90
 
91
91
 
92
+ def _join_url_path(base_url: str, path: str) -> str:
93
+ parsed_base = urlparse(base_url)
94
+ base_path = parsed_base.path[:-1] if parsed_base.path.endswith('/') else parsed_base.path
95
+ path = path[1:] if path.startswith('/') else path
96
+ return urlunparse(parsed_base._replace(path=base_path + '/' + path))
97
+
98
+
92
99
  @dataclass
93
100
  class HttpRequest:
94
101
  headers: dict[str, str]
@@ -216,9 +223,10 @@ class ApiClient:
216
223
  patched_http_options = self._http_options
217
224
  if self.vertexai and not path.startswith('projects/'):
218
225
  path = f'projects/{self.project}/locations/{self.location}/' + path
219
- url = urllib.parse.urljoin(
226
+ url = _join_url_path(
220
227
  patched_http_options['base_url'],
221
- patched_http_options['api_version'] + '/' + path)
228
+ patched_http_options['api_version'] + '/' + path,
229
+ )
222
230
  return HttpRequest(
223
231
  method=http_method,
224
232
  url=url,
@@ -265,13 +273,13 @@ class ApiClient:
265
273
  data = http_request.data
266
274
 
267
275
  http_session = requests.Session()
268
- async_request = requests.Request(
276
+ request = requests.Request(
269
277
  method=http_request.method,
270
278
  url=http_request.url,
271
279
  headers=http_request.headers,
272
280
  data=data,
273
281
  ).prepare()
274
- response = http_session.send(async_request, stream=stream)
282
+ response = http_session.send(request, stream=stream)
275
283
  errors.APIError.raise_for_response(response)
276
284
  return HttpResponse(
277
285
  response.headers, response if stream else [response.text]
@@ -295,47 +295,3 @@ def _get_required_fields(schema: types.Schema) -> list[str]:
295
295
  if not field_schema.nullable and field_schema.default is None
296
296
  ]
297
297
 
298
-
299
- def function_to_declaration(
300
- client, func: Callable
301
- ) -> types.FunctionDeclaration:
302
- """Converts a function to a FunctionDeclaration."""
303
- parameters_properties = {}
304
- for name, param in inspect.signature(func).parameters.items():
305
- if param.kind in (
306
- inspect.Parameter.POSITIONAL_OR_KEYWORD,
307
- inspect.Parameter.KEYWORD_ONLY,
308
- inspect.Parameter.POSITIONAL_ONLY,
309
- ):
310
- schema = _parse_schema_from_parameter(client, param, func.__name__)
311
- parameters_properties[name] = schema
312
- declaration = types.FunctionDeclaration(
313
- name=func.__name__,
314
- description=func.__doc__,
315
- )
316
- if parameters_properties:
317
- declaration.parameters = types.Schema(
318
- type='OBJECT',
319
- properties=parameters_properties,
320
- )
321
- if client.vertexai:
322
- declaration.parameters.required = _get_required_fields(
323
- declaration.parameters
324
- )
325
- if not client.vertexai:
326
- return declaration
327
-
328
- return_annotation = inspect.signature(func).return_annotation
329
- if return_annotation is inspect._empty:
330
- return declaration
331
-
332
- declaration.response = _parse_schema_from_parameter(
333
- client,
334
- inspect.Parameter(
335
- 'return_value',
336
- inspect.Parameter.POSITIONAL_OR_KEYWORD,
337
- annotation=return_annotation,
338
- ),
339
- func.__name__,
340
- )
341
- return declaration
@@ -293,3 +293,18 @@ def get_max_remote_calls_afc(
293
293
  ):
294
294
  return _DEFAULT_MAX_REMOTE_CALLS_AFC
295
295
  return int(config_model.automatic_function_calling.maximum_remote_calls)
296
+
297
+ def should_append_afc_history(
298
+ config: Optional[types.GenerateContentConfigOrDict] = None,
299
+ ) -> bool:
300
+ config_model = (
301
+ types.GenerateContentConfig(**config)
302
+ if config and isinstance(config, dict)
303
+ else config
304
+ )
305
+ if (
306
+ not config_model
307
+ or not config_model.automatic_function_calling
308
+ ):
309
+ return True
310
+ return not config_model.automatic_function_calling.ignore_call_history
@@ -27,7 +27,6 @@ import PIL.Image
27
27
 
28
28
  from . import _api_client
29
29
  from . import types
30
- from ._automatic_function_calling_util import function_to_declaration
31
30
 
32
31
 
33
32
  def _resource_name(
@@ -307,7 +306,9 @@ def t_tool(client: _api_client.ApiClient, origin) -> types.Tool:
307
306
  return None
308
307
  if inspect.isfunction(origin):
309
308
  return types.Tool(
310
- function_declarations=[function_to_declaration(client, origin)]
309
+ function_declarations=[
310
+ types.FunctionDeclaration.from_function(client, origin)
311
+ ]
311
312
  )
312
313
  else:
313
314
  return origin
google/genai/batches.py CHANGED
@@ -213,6 +213,30 @@ def _CreateBatchJobParameters_to_vertex(
213
213
  return to_object
214
214
 
215
215
 
216
+ def _GetBatchJobConfig_to_mldev(
217
+ api_client: ApiClient,
218
+ from_object: Union[dict, object],
219
+ parent_object: dict = None,
220
+ ) -> dict:
221
+ to_object = {}
222
+ if getv(from_object, ['http_options']) is not None:
223
+ setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
224
+
225
+ return to_object
226
+
227
+
228
+ def _GetBatchJobConfig_to_vertex(
229
+ api_client: ApiClient,
230
+ from_object: Union[dict, object],
231
+ parent_object: dict = None,
232
+ ) -> dict:
233
+ to_object = {}
234
+ if getv(from_object, ['http_options']) is not None:
235
+ setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
236
+
237
+ return to_object
238
+
239
+
216
240
  def _GetBatchJobParameters_to_mldev(
217
241
  api_client: ApiClient,
218
242
  from_object: Union[dict, object],
@@ -222,6 +246,15 @@ def _GetBatchJobParameters_to_mldev(
222
246
  if getv(from_object, ['name']):
223
247
  raise ValueError('name parameter is not supported in Google AI.')
224
248
 
249
+ if getv(from_object, ['config']) is not None:
250
+ setv(
251
+ to_object,
252
+ ['config'],
253
+ _GetBatchJobConfig_to_mldev(
254
+ api_client, getv(from_object, ['config']), to_object
255
+ ),
256
+ )
257
+
225
258
  return to_object
226
259
 
227
260
 
@@ -238,6 +271,39 @@ def _GetBatchJobParameters_to_vertex(
238
271
  t.t_batch_job_name(api_client, getv(from_object, ['name'])),
239
272
  )
240
273
 
274
+ if getv(from_object, ['config']) is not None:
275
+ setv(
276
+ to_object,
277
+ ['config'],
278
+ _GetBatchJobConfig_to_vertex(
279
+ api_client, getv(from_object, ['config']), to_object
280
+ ),
281
+ )
282
+
283
+ return to_object
284
+
285
+
286
+ def _CancelBatchJobConfig_to_mldev(
287
+ api_client: ApiClient,
288
+ from_object: Union[dict, object],
289
+ parent_object: dict = None,
290
+ ) -> dict:
291
+ to_object = {}
292
+ if getv(from_object, ['http_options']) is not None:
293
+ setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
294
+
295
+ return to_object
296
+
297
+
298
+ def _CancelBatchJobConfig_to_vertex(
299
+ api_client: ApiClient,
300
+ from_object: Union[dict, object],
301
+ parent_object: dict = None,
302
+ ) -> dict:
303
+ to_object = {}
304
+ if getv(from_object, ['http_options']) is not None:
305
+ setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
306
+
241
307
  return to_object
242
308
 
243
309
 
@@ -250,6 +316,15 @@ def _CancelBatchJobParameters_to_mldev(
250
316
  if getv(from_object, ['name']):
251
317
  raise ValueError('name parameter is not supported in Google AI.')
252
318
 
319
+ if getv(from_object, ['config']) is not None:
320
+ setv(
321
+ to_object,
322
+ ['config'],
323
+ _CancelBatchJobConfig_to_mldev(
324
+ api_client, getv(from_object, ['config']), to_object
325
+ ),
326
+ )
327
+
253
328
  return to_object
254
329
 
255
330
 
@@ -266,6 +341,15 @@ def _CancelBatchJobParameters_to_vertex(
266
341
  t.t_batch_job_name(api_client, getv(from_object, ['name'])),
267
342
  )
268
343
 
344
+ if getv(from_object, ['config']) is not None:
345
+ setv(
346
+ to_object,
347
+ ['config'],
348
+ _CancelBatchJobConfig_to_vertex(
349
+ api_client, getv(from_object, ['config']), to_object
350
+ ),
351
+ )
352
+
269
353
  return to_object
270
354
 
271
355
 
@@ -660,9 +744,30 @@ class Batches(_common.BaseModule):
660
744
  self.api_client._verify_response(return_value)
661
745
  return return_value
662
746
 
663
- def get(self, *, name: str) -> types.BatchJob:
747
+ def get(
748
+ self, *, name: str, config: Optional[types.GetBatchJobConfigOrDict] = None
749
+ ) -> types.BatchJob:
750
+ """Gets a batch job.
751
+
752
+ Args:
753
+ name (str): A fully-qualified BatchJob resource name or ID.
754
+ Example: "projects/.../locations/.../batchPredictionJobs/456" or "456"
755
+ when project and location are initialized in the client.
756
+
757
+ Returns:
758
+ A BatchJob object that contains details about the batch job.
759
+
760
+ Usage:
761
+
762
+ .. code-block:: python
763
+
764
+ batch_job = client.batches.get(name='123456789')
765
+ print(f"Batch job: {batch_job.name}, state {batch_job.state}")
766
+ """
767
+
664
768
  parameter_model = types._GetBatchJobParameters(
665
769
  name=name,
770
+ config=config,
666
771
  )
667
772
 
668
773
  if not self.api_client.vertexai:
@@ -695,9 +800,15 @@ class Batches(_common.BaseModule):
695
800
  self.api_client._verify_response(return_value)
696
801
  return return_value
697
802
 
698
- def cancel(self, *, name: str) -> None:
803
+ def cancel(
804
+ self,
805
+ *,
806
+ name: str,
807
+ config: Optional[types.CancelBatchJobConfigOrDict] = None,
808
+ ) -> None:
699
809
  parameter_model = types._CancelBatchJobParameters(
700
810
  name=name,
811
+ config=config,
701
812
  )
702
813
 
703
814
  if not self.api_client.vertexai:
@@ -767,6 +878,23 @@ class Batches(_common.BaseModule):
767
878
  return return_value
768
879
 
769
880
  def delete(self, *, name: str) -> types.DeleteResourceJob:
881
+ """Deletes a batch job.
882
+
883
+ Args:
884
+ name (str): A fully-qualified BatchJob resource name or ID.
885
+ Example: "projects/.../locations/.../batchPredictionJobs/456" or "456"
886
+ when project and location are initialized in the client.
887
+
888
+ Returns:
889
+ A DeleteResourceJob object that shows the status of the deletion.
890
+
891
+ Usage:
892
+
893
+ .. code-block:: python
894
+
895
+ client.batches.delete(name='123456789')
896
+ """
897
+
770
898
  parameter_model = types._DeleteBatchJobParameters(
771
899
  name=name,
772
900
  )
@@ -814,12 +942,51 @@ class Batches(_common.BaseModule):
814
942
  src: str,
815
943
  config: Optional[types.CreateBatchJobConfigOrDict] = None,
816
944
  ) -> types.BatchJob:
945
+ """Creates a batch job.
946
+
947
+ Args:
948
+ model (str): The model to use for the batch job.
949
+ src (str): The source of the batch job. Currently supports GCS URI(-s) or
950
+ Bigquery URI. Example: "gs://path/to/input/data" or
951
+ "bq://projectId.bqDatasetId.bqTableId".
952
+ config (CreateBatchJobConfig): Optional configuration for the batch job.
953
+
954
+ Returns:
955
+ A BatchJob object that contains details about the batch job.
956
+
957
+ Usage:
958
+
959
+ .. code-block:: python
960
+
961
+ batch_job = client.batches.create(
962
+ model="gemini-1.5-flash",
963
+ src="gs://path/to/input/data",
964
+ )
965
+ print(batch_job.state)
966
+ """
817
967
  config = _extra_utils.format_destination(src, config)
818
968
  return self._create(model=model, src=src, config=config)
819
969
 
820
970
  def list(
821
971
  self, *, config: Optional[types.ListBatchJobConfigOrDict] = None
822
972
  ) -> Pager[types.BatchJob]:
973
+ """Lists batch jobs.
974
+
975
+ Args:
976
+ config (ListBatchJobConfig): Optional configuration for the list request.
977
+
978
+ Returns:
979
+ A Pager object that contains one page of batch jobs. When iterating over
980
+ the pager, it automatically fetches the next page if there are more.
981
+
982
+ Usage:
983
+
984
+ .. code-block:: python
985
+
986
+ batch_jobs = client.batches.list(config={"page_size": 10})
987
+ for batch_job in batch_jobs:
988
+ print(f"Batch job: {batch_job.name}, state {batch_job.state}")
989
+ """
823
990
  return Pager(
824
991
  'batch_jobs',
825
992
  self._list,
@@ -873,9 +1040,30 @@ class AsyncBatches(_common.BaseModule):
873
1040
  self.api_client._verify_response(return_value)
874
1041
  return return_value
875
1042
 
876
- async def get(self, *, name: str) -> types.BatchJob:
1043
+ async def get(
1044
+ self, *, name: str, config: Optional[types.GetBatchJobConfigOrDict] = None
1045
+ ) -> types.BatchJob:
1046
+ """Gets a batch job.
1047
+
1048
+ Args:
1049
+ name (str): A fully-qualified BatchJob resource name or ID.
1050
+ Example: "projects/.../locations/.../batchPredictionJobs/456" or "456"
1051
+ when project and location are initialized in the client.
1052
+
1053
+ Returns:
1054
+ A BatchJob object that contains details about the batch job.
1055
+
1056
+ Usage:
1057
+
1058
+ .. code-block:: python
1059
+
1060
+ batch_job = client.batches.get(name='123456789')
1061
+ print(f"Batch job: {batch_job.name}, state {batch_job.state}")
1062
+ """
1063
+
877
1064
  parameter_model = types._GetBatchJobParameters(
878
1065
  name=name,
1066
+ config=config,
879
1067
  )
880
1068
 
881
1069
  if not self.api_client.vertexai:
@@ -908,9 +1096,15 @@ class AsyncBatches(_common.BaseModule):
908
1096
  self.api_client._verify_response(return_value)
909
1097
  return return_value
910
1098
 
911
- async def cancel(self, *, name: str) -> None:
1099
+ async def cancel(
1100
+ self,
1101
+ *,
1102
+ name: str,
1103
+ config: Optional[types.CancelBatchJobConfigOrDict] = None,
1104
+ ) -> None:
912
1105
  parameter_model = types._CancelBatchJobParameters(
913
1106
  name=name,
1107
+ config=config,
914
1108
  )
915
1109
 
916
1110
  if not self.api_client.vertexai:
@@ -980,6 +1174,23 @@ class AsyncBatches(_common.BaseModule):
980
1174
  return return_value
981
1175
 
982
1176
  async def delete(self, *, name: str) -> types.DeleteResourceJob:
1177
+ """Deletes a batch job.
1178
+
1179
+ Args:
1180
+ name (str): A fully-qualified BatchJob resource name or ID.
1181
+ Example: "projects/.../locations/.../batchPredictionJobs/456" or "456"
1182
+ when project and location are initialized in the client.
1183
+
1184
+ Returns:
1185
+ A DeleteResourceJob object that shows the status of the deletion.
1186
+
1187
+ Usage:
1188
+
1189
+ .. code-block:: python
1190
+
1191
+ client.batches.delete(name='123456789')
1192
+ """
1193
+
983
1194
  parameter_model = types._DeleteBatchJobParameters(
984
1195
  name=name,
985
1196
  )
@@ -1027,12 +1238,51 @@ class AsyncBatches(_common.BaseModule):
1027
1238
  src: str,
1028
1239
  config: Optional[types.CreateBatchJobConfigOrDict] = None,
1029
1240
  ) -> types.BatchJob:
1241
+ """Creates a batch job asynchronously.
1242
+
1243
+ Args:
1244
+ model (str): The model to use for the batch job.
1245
+ src (str): The source of the batch job. Currently supports GCS URI(-s) or
1246
+ Bigquery URI. Example: "gs://path/to/input/data" or
1247
+ "bq://projectId.bqDatasetId.bqTableId".
1248
+ config (CreateBatchJobConfig): Optional configuration for the batch job.
1249
+
1250
+ Returns:
1251
+ A BatchJob object that contains details about the batch job.
1252
+
1253
+ Usage:
1254
+
1255
+ .. code-block:: python
1256
+
1257
+ batch_job = await client.aio.batches.create(
1258
+ model="gemini-1.5-flash",
1259
+ src="gs://path/to/input/data",
1260
+ )
1261
+ """
1030
1262
  config = _extra_utils.format_destination(src, config)
1031
1263
  return await self._create(model=model, src=src, config=config)
1032
1264
 
1033
1265
  async def list(
1034
1266
  self, *, config: Optional[types.ListBatchJobConfigOrDict] = None
1035
1267
  ) -> AsyncPager[types.BatchJob]:
1268
+ """Lists batch jobs asynchronously.
1269
+
1270
+ Args:
1271
+ config (ListBatchJobConfig): Optional configuration for the list request.
1272
+
1273
+ Returns:
1274
+ A Pager object that contains one page of batch jobs. When iterating over
1275
+ the pager, it automatically fetches the next page if there are more.
1276
+
1277
+ Usage:
1278
+
1279
+ .. code-block:: python
1280
+
1281
+ batch_jobs = await client.aio.batches.list(config={'page_size': 5})
1282
+ print(f"current page: {batch_jobs.page}")
1283
+ await batch_jobs_pager.next_page()
1284
+ print(f"next page: {batch_jobs_pager.page}")
1285
+ """
1036
1286
  return AsyncPager(
1037
1287
  'batch_jobs',
1038
1288
  self._list,
google/genai/caches.py CHANGED
@@ -1249,6 +1249,7 @@ class Caches(_common.BaseModule):
1249
1249
  Usage:
1250
1250
 
1251
1251
  .. code-block:: python
1252
+
1252
1253
  contents = ... // Initialize the content to cache.
1253
1254
  response = await client.aio.caches.create(
1254
1255
  model= ... // The publisher model id
@@ -1310,6 +1311,7 @@ class Caches(_common.BaseModule):
1310
1311
  """Gets cached content configurations.
1311
1312
 
1312
1313
  .. code-block:: python
1314
+
1313
1315
  await client.aio.caches.get(name= ... ) // The server-generated resource
1314
1316
  name.
1315
1317
  """
@@ -1364,6 +1366,7 @@ class Caches(_common.BaseModule):
1364
1366
  Usage:
1365
1367
 
1366
1368
  .. code-block:: python
1369
+
1367
1370
  await client.aio.caches.delete(name= ... ) // The server-generated
1368
1371
  resource name.
1369
1372
  """
@@ -1420,6 +1423,7 @@ class Caches(_common.BaseModule):
1420
1423
  """Updates cached content configurations.
1421
1424
 
1422
1425
  .. code-block:: python
1426
+
1423
1427
  response = await client.aio.caches.update(
1424
1428
  name= ... // The server-generated resource name.
1425
1429
  config={
@@ -1473,6 +1477,7 @@ class Caches(_common.BaseModule):
1473
1477
  """Lists cached content configurations.
1474
1478
 
1475
1479
  .. code-block:: python
1480
+
1476
1481
  cached_contents = await client.aio.caches.list(config={'page_size': 2})
1477
1482
  async for cached_content in cached_contents:
1478
1483
  print(cached_content)
@@ -1548,6 +1553,7 @@ class AsyncCaches(_common.BaseModule):
1548
1553
  Usage:
1549
1554
 
1550
1555
  .. code-block:: python
1556
+
1551
1557
  contents = ... // Initialize the content to cache.
1552
1558
  response = await client.aio.caches.create(
1553
1559
  model= ... // The publisher model id
@@ -1609,6 +1615,7 @@ class AsyncCaches(_common.BaseModule):
1609
1615
  """Gets cached content configurations.
1610
1616
 
1611
1617
  .. code-block:: python
1618
+
1612
1619
  await client.aio.caches.get(name= ... ) // The server-generated resource
1613
1620
  name.
1614
1621
  """
@@ -1663,6 +1670,7 @@ class AsyncCaches(_common.BaseModule):
1663
1670
  Usage:
1664
1671
 
1665
1672
  .. code-block:: python
1673
+
1666
1674
  await client.aio.caches.delete(name= ... ) // The server-generated
1667
1675
  resource name.
1668
1676
  """
@@ -1719,6 +1727,7 @@ class AsyncCaches(_common.BaseModule):
1719
1727
  """Updates cached content configurations.
1720
1728
 
1721
1729
  .. code-block:: python
1730
+
1722
1731
  response = await client.aio.caches.update(
1723
1732
  name= ... // The server-generated resource name.
1724
1733
  config={
@@ -1772,6 +1781,7 @@ class AsyncCaches(_common.BaseModule):
1772
1781
  """Lists cached content configurations.
1773
1782
 
1774
1783
  .. code-block:: python
1784
+
1775
1785
  cached_contents = await client.aio.caches.list(config={'page_size': 2})
1776
1786
  async for cached_content in cached_contents:
1777
1787
  print(cached_content)
google/genai/chats.py CHANGED
@@ -54,6 +54,7 @@ class Chat(_BaseChat):
54
54
  Usage:
55
55
 
56
56
  .. code-block:: python
57
+
57
58
  chat = client.chats.create(model='gemini-1.5-flash')
58
59
  response = chat.send_message('tell me a story')
59
60
  """
@@ -65,7 +66,12 @@ class Chat(_BaseChat):
65
66
  config=self._config,
66
67
  )
67
68
  if response.candidates and response.candidates[0].content:
68
- self._curated_history.append(input_content)
69
+ if response.automatic_function_calling_history:
70
+ self._curated_history.extend(
71
+ response.automatic_function_calling_history
72
+ )
73
+ else:
74
+ self._curated_history.append(input_content)
69
75
  self._curated_history.append(response.candidates[0].content)
70
76
  return response
71
77
 
@@ -127,6 +133,7 @@ class AsyncChat(_BaseChat):
127
133
  Usage:
128
134
 
129
135
  .. code-block:: python
136
+
130
137
  chat = client.chats.create(model='gemini-1.5-flash')
131
138
  response = chat.send_message('tell me a story')
132
139
  """
@@ -138,7 +145,12 @@ class AsyncChat(_BaseChat):
138
145
  config=self._config,
139
146
  )
140
147
  if response.candidates and response.candidates[0].content:
141
- self._curated_history.append(input_content)
148
+ if response.automatic_function_calling_history:
149
+ self._curated_history.extend(
150
+ response.automatic_function_calling_history
151
+ )
152
+ else:
153
+ self._curated_history.append(input_content)
142
154
  self._curated_history.append(response.candidates[0].content)
143
155
  return response
144
156
 
google/genai/files.py CHANGED
@@ -605,6 +605,7 @@ class Files(_common.BaseModule):
605
605
  Usage:
606
606
 
607
607
  .. code-block:: python
608
+
608
609
  pager = client.files.list(config={'page_size': 10})
609
610
  for file in pager.page:
610
611
  print(file.name)
@@ -712,6 +713,7 @@ class Files(_common.BaseModule):
712
713
  Usage:
713
714
 
714
715
  .. code-block:: python
716
+
715
717
  file = client.files.get(name='files/...')
716
718
  print(file.uri)
717
719
  """
@@ -766,6 +768,7 @@ class Files(_common.BaseModule):
766
768
  Usage:
767
769
 
768
770
  .. code-block:: python
771
+
769
772
  client.files.delete(name='files/...')
770
773
  """
771
774
 
@@ -916,6 +919,7 @@ class AsyncFiles(_common.BaseModule):
916
919
  Usage:
917
920
 
918
921
  .. code-block:: python
922
+
919
923
  pager = client.files.list(config={'page_size': 10})
920
924
  for file in pager.page:
921
925
  print(file.name)
@@ -1023,6 +1027,7 @@ class AsyncFiles(_common.BaseModule):
1023
1027
  Usage:
1024
1028
 
1025
1029
  .. code-block:: python
1030
+
1026
1031
  file = client.files.get(name='files/...')
1027
1032
  print(file.uri)
1028
1033
  """
@@ -1077,6 +1082,7 @@ class AsyncFiles(_common.BaseModule):
1077
1082
  Usage:
1078
1083
 
1079
1084
  .. code-block:: python
1085
+
1080
1086
  client.files.delete(name='files/...')
1081
1087
  """
1082
1088