google-genai 1.7.0__py3-none-any.whl → 1.53.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. google/genai/__init__.py +4 -2
  2. google/genai/_adapters.py +55 -0
  3. google/genai/_api_client.py +1301 -299
  4. google/genai/_api_module.py +1 -1
  5. google/genai/_automatic_function_calling_util.py +54 -33
  6. google/genai/_base_transformers.py +26 -0
  7. google/genai/_base_url.py +50 -0
  8. google/genai/_common.py +560 -59
  9. google/genai/_extra_utils.py +371 -38
  10. google/genai/_live_converters.py +1467 -0
  11. google/genai/_local_tokenizer_loader.py +214 -0
  12. google/genai/_mcp_utils.py +117 -0
  13. google/genai/_operations_converters.py +394 -0
  14. google/genai/_replay_api_client.py +204 -92
  15. google/genai/_test_api_client.py +1 -1
  16. google/genai/_tokens_converters.py +520 -0
  17. google/genai/_transformers.py +633 -233
  18. google/genai/batches.py +1733 -538
  19. google/genai/caches.py +678 -1012
  20. google/genai/chats.py +48 -38
  21. google/genai/client.py +142 -15
  22. google/genai/documents.py +532 -0
  23. google/genai/errors.py +141 -35
  24. google/genai/file_search_stores.py +1296 -0
  25. google/genai/files.py +312 -744
  26. google/genai/live.py +617 -367
  27. google/genai/live_music.py +197 -0
  28. google/genai/local_tokenizer.py +395 -0
  29. google/genai/models.py +3598 -3116
  30. google/genai/operations.py +201 -362
  31. google/genai/pagers.py +23 -7
  32. google/genai/py.typed +1 -0
  33. google/genai/tokens.py +362 -0
  34. google/genai/tunings.py +1274 -496
  35. google/genai/types.py +14535 -5454
  36. google/genai/version.py +2 -2
  37. {google_genai-1.7.0.dist-info → google_genai-1.53.0.dist-info}/METADATA +736 -234
  38. google_genai-1.53.0.dist-info/RECORD +41 -0
  39. {google_genai-1.7.0.dist-info → google_genai-1.53.0.dist-info}/WHEEL +1 -1
  40. google_genai-1.7.0.dist-info/RECORD +0 -27
  41. {google_genai-1.7.0.dist-info → google_genai-1.53.0.dist-info/licenses}/LICENSE +0 -0
  42. {google_genai-1.7.0.dist-info → google_genai-1.53.0.dist-info}/top_level.txt +0 -0
google/genai/chats.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright 2024 Google LLC
1
+ # Copyright 2025 Google LLC
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -13,13 +13,14 @@
13
13
  # limitations under the License.
14
14
  #
15
15
 
16
+ from collections.abc import Iterator
16
17
  import sys
17
18
  from typing import AsyncIterator, Awaitable, Optional, Union, get_args
18
19
 
19
20
  from . import _transformers as t
20
21
  from . import types
21
22
  from .models import AsyncModels, Models
22
- from .types import Content, GenerateContentConfigOrDict, GenerateContentResponse, Part, PartUnionDict
23
+ from .types import Content, ContentOrDict, GenerateContentConfigOrDict, GenerateContentResponse, Part, PartUnionDict
23
24
 
24
25
 
25
26
  if sys.version_info >= (3, 10):
@@ -62,13 +63,8 @@ def _extract_curated_history(
62
63
  """Extracts the curated (valid) history from a comprehensive history.
63
64
 
64
65
  The comprehensive history contains all turns (user input and model responses),
65
- including any invalid or rejected model outputs. This function filters
66
- that history to return only the valid turns.
67
-
68
- A "turn" starts with one user input (a single content) and then follows by
69
- corresponding model response (which may consist of multiple contents).
70
- Turns are assumed to alternate: user input, model output, user input, model
71
- output, etc.
66
+ including any invalid or rejected model outputs. This function filters that
67
+ history to return only the valid turns.
72
68
 
73
69
  Args:
74
70
  comprehensive_history: A list representing the complete chat history.
@@ -83,8 +79,6 @@ def _extract_curated_history(
83
79
  length = len(comprehensive_history)
84
80
  i = 0
85
81
  current_input = comprehensive_history[i]
86
- if current_input.role != "user":
87
- raise ValueError("History must start with a user turn.")
88
82
  while i < length:
89
83
  if comprehensive_history[i].role not in ["user", "model"]:
90
84
  raise ValueError(
@@ -93,6 +87,7 @@ def _extract_curated_history(
93
87
 
94
88
  if comprehensive_history[i].role == "user":
95
89
  current_input = comprehensive_history[i]
90
+ curated_history.append(current_input)
96
91
  i += 1
97
92
  else:
98
93
  current_output = []
@@ -103,8 +98,9 @@ def _extract_curated_history(
103
98
  is_valid = False
104
99
  i += 1
105
100
  if is_valid:
106
- curated_history.append(current_input)
107
101
  curated_history.extend(current_output)
102
+ elif curated_history:
103
+ curated_history.pop()
108
104
  return curated_history
109
105
 
110
106
 
@@ -116,14 +112,21 @@ class _BaseChat:
116
112
  *,
117
113
  model: str,
118
114
  config: Optional[GenerateContentConfigOrDict] = None,
119
- history: list[Content],
115
+ history: list[ContentOrDict],
120
116
  ):
121
117
  self._model = model
122
118
  self._config = config
123
- self._comprehensive_history = history
119
+ content_models = []
120
+ for content in history:
121
+ if not isinstance(content, Content):
122
+ content_model = Content.model_validate(content)
123
+ else:
124
+ content_model = content
125
+ content_models.append(content_model)
126
+ self._comprehensive_history = content_models
124
127
  """Comprehensive history is the full history of the chat, including turns of the invalid contents from the model and their associated inputs.
125
128
  """
126
- self._curated_history = _extract_curated_history(history)
129
+ self._curated_history = _extract_curated_history(content_models)
127
130
  """Curated history is the set of valid turns that will be used in the subsequent send requests.
128
131
  """
129
132
 
@@ -133,7 +136,7 @@ class _BaseChat:
133
136
  model_output: list[Content],
134
137
  automatic_function_calling_history: list[Content],
135
138
  is_valid: bool,
136
- ):
139
+ ) -> None:
137
140
  """Records the chat history.
138
141
 
139
142
  Maintaining both comprehensive and curated histories.
@@ -152,7 +155,7 @@ class _BaseChat:
152
155
  # Because the AFC input contains the entire curated chat history in
153
156
  # addition to the new user input, we need to truncate the AFC history
154
157
  # to deduplicate the existing chat history.
155
- automatic_function_calling_history[len(self._curated_history):]
158
+ automatic_function_calling_history[len(self._curated_history) :]
156
159
  if automatic_function_calling_history
157
160
  else [user_input]
158
161
  )
@@ -210,7 +213,7 @@ class Chat(_BaseChat):
210
213
  modules: Models,
211
214
  model: str,
212
215
  config: Optional[GenerateContentConfigOrDict] = None,
213
- history: list[Content],
216
+ history: list[ContentOrDict],
214
217
  ):
215
218
  self._modules = modules
216
219
  super().__init__(
@@ -238,7 +241,7 @@ class Chat(_BaseChat):
238
241
 
239
242
  .. code-block:: python
240
243
 
241
- chat = client.chats.create(model='gemini-1.5-flash')
244
+ chat = client.chats.create(model='gemini-2.0-flash')
242
245
  response = chat.send_message('tell me a story')
243
246
  """
244
247
 
@@ -247,7 +250,7 @@ class Chat(_BaseChat):
247
250
  f"Message must be a valid part type: {types.PartUnion} or"
248
251
  f" {types.PartUnionDict}, got {type(message)}"
249
252
  )
250
- input_content = t.t_content(self._modules._api_client, message)
253
+ input_content = t.t_content(message)
251
254
  response = self._modules.generate_content(
252
255
  model=self._model,
253
256
  contents=self._curated_history + [input_content], # type: ignore[arg-type]
@@ -275,7 +278,7 @@ class Chat(_BaseChat):
275
278
  self,
276
279
  message: Union[list[PartUnionDict], PartUnionDict],
277
280
  config: Optional[GenerateContentConfigOrDict] = None,
278
- ):
281
+ ) -> Iterator[GenerateContentResponse]:
279
282
  """Sends the conversation history with the additional message and yields the model's response in chunks.
280
283
 
281
284
  Args:
@@ -290,7 +293,7 @@ class Chat(_BaseChat):
290
293
 
291
294
  .. code-block:: python
292
295
 
293
- chat = client.chats.create(model='gemini-1.5-flash')
296
+ chat = client.chats.create(model='gemini-2.0-flash')
294
297
  for chunk in chat.send_message_stream('tell me a story'):
295
298
  print(chunk.text)
296
299
  """
@@ -300,7 +303,7 @@ class Chat(_BaseChat):
300
303
  f"Message must be a valid part type: {types.PartUnion} or"
301
304
  f" {types.PartUnionDict}, got {type(message)}"
302
305
  )
303
- input_content = t.t_content(self._modules._api_client, message)
306
+ input_content = t.t_content(message)
304
307
  output_contents = []
305
308
  finish_reason = None
306
309
  is_valid = True
@@ -320,7 +323,7 @@ class Chat(_BaseChat):
320
323
  yield chunk
321
324
  automatic_function_calling_history = (
322
325
  chunk.automatic_function_calling_history
323
- if chunk.automatic_function_calling_history
326
+ if chunk is not None and chunk.automatic_function_calling_history
324
327
  else []
325
328
  )
326
329
  self.record_history(
@@ -344,7 +347,7 @@ class Chats:
344
347
  *,
345
348
  model: str,
346
349
  config: Optional[GenerateContentConfigOrDict] = None,
347
- history: Optional[list[Content]] = None,
350
+ history: Optional[list[ContentOrDict]] = None,
348
351
  ) -> Chat:
349
352
  """Creates a new chat session.
350
353
 
@@ -373,7 +376,7 @@ class AsyncChat(_BaseChat):
373
376
  modules: AsyncModels,
374
377
  model: str,
375
378
  config: Optional[GenerateContentConfigOrDict] = None,
376
- history: list[Content],
379
+ history: list[ContentOrDict],
377
380
  ):
378
381
  self._modules = modules
379
382
  super().__init__(
@@ -401,7 +404,7 @@ class AsyncChat(_BaseChat):
401
404
 
402
405
  .. code-block:: python
403
406
 
404
- chat = client.aio.chats.create(model='gemini-1.5-flash')
407
+ chat = client.aio.chats.create(model='gemini-2.0-flash')
405
408
  response = await chat.send_message('tell me a story')
406
409
  """
407
410
  if not _is_part_type(message):
@@ -409,7 +412,7 @@ class AsyncChat(_BaseChat):
409
412
  f"Message must be a valid part type: {types.PartUnion} or"
410
413
  f" {types.PartUnionDict}, got {type(message)}"
411
414
  )
412
- input_content = t.t_content(self._modules._api_client, message)
415
+ input_content = t.t_content(message)
413
416
  response = await self._modules.generate_content(
414
417
  model=self._model,
415
418
  contents=self._curated_history + [input_content], # type: ignore[arg-type]
@@ -437,7 +440,7 @@ class AsyncChat(_BaseChat):
437
440
  self,
438
441
  message: Union[list[PartUnionDict], PartUnionDict],
439
442
  config: Optional[GenerateContentConfigOrDict] = None,
440
- ) -> Awaitable[AsyncIterator[GenerateContentResponse]]:
443
+ ) -> AsyncIterator[GenerateContentResponse]:
441
444
  """Sends the conversation history with the additional message and yields the model's response in chunks.
442
445
 
443
446
  Args:
@@ -451,7 +454,8 @@ class AsyncChat(_BaseChat):
451
454
  Usage:
452
455
 
453
456
  .. code-block:: python
454
- chat = client.aio.chats.create(model='gemini-1.5-flash')
457
+
458
+ chat = client.aio.chats.create(model='gemini-2.0-flash')
455
459
  async for chunk in await chat.send_message_stream('tell me a story'):
456
460
  print(chunk.text)
457
461
  """
@@ -461,16 +465,16 @@ class AsyncChat(_BaseChat):
461
465
  f"Message must be a valid part type: {types.PartUnion} or"
462
466
  f" {types.PartUnionDict}, got {type(message)}"
463
467
  )
464
- input_content = t.t_content(self._modules._api_client, message)
468
+ input_content = t.t_content(message)
465
469
 
466
- async def async_generator():
470
+ async def async_generator(): # type: ignore[no-untyped-def]
467
471
  output_contents = []
468
472
  finish_reason = None
469
473
  is_valid = True
470
474
  chunk = None
471
- async for chunk in await self._modules.generate_content_stream(
475
+ async for chunk in await self._modules.generate_content_stream( # type: ignore[attr-defined]
472
476
  model=self._model,
473
- contents=self._curated_history + [input_content],
477
+ contents=self._curated_history + [input_content], # type: ignore[arg-type]
474
478
  config=config if config else self._config,
475
479
  ):
476
480
  if not _validate_response(chunk):
@@ -481,13 +485,19 @@ class AsyncChat(_BaseChat):
481
485
  finish_reason = chunk.candidates[0].finish_reason
482
486
  yield chunk
483
487
 
488
+ if not output_contents or finish_reason is None:
489
+ is_valid = False
490
+
484
491
  self.record_history(
485
492
  user_input=input_content,
486
493
  model_output=output_contents,
487
- automatic_function_calling_history=chunk.automatic_function_calling_history,
488
- is_valid=is_valid and output_contents and finish_reason,
494
+ automatic_function_calling_history=chunk.automatic_function_calling_history
495
+ if chunk is not None and chunk.automatic_function_calling_history
496
+ else [],
497
+ is_valid=is_valid,
489
498
  )
490
- return async_generator()
499
+
500
+ return async_generator() # type: ignore[no-untyped-call, no-any-return]
491
501
 
492
502
 
493
503
  class AsyncChats:
@@ -501,7 +511,7 @@ class AsyncChats:
501
511
  *,
502
512
  model: str,
503
513
  config: Optional[GenerateContentConfigOrDict] = None,
504
- history: Optional[list[Content]] = None,
514
+ history: Optional[list[ContentOrDict]] = None,
505
515
  ) -> AsyncChat:
506
516
  """Creates a new chat session.
507
517
 
google/genai/client.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright 2024 Google LLC
1
+ # Copyright 2025 Google LLC
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -13,22 +13,28 @@
13
13
  # limitations under the License.
14
14
  #
15
15
 
16
+ import asyncio
16
17
  import os
18
+ from types import TracebackType
17
19
  from typing import Optional, Union
18
20
 
19
21
  import google.auth
20
22
  import pydantic
21
23
 
22
- from ._api_client import BaseApiClient, HttpOptions, HttpOptionsDict
24
+ from ._api_client import BaseApiClient
25
+ from ._base_url import get_base_url
23
26
  from ._replay_api_client import ReplayApiClient
24
27
  from .batches import AsyncBatches, Batches
25
28
  from .caches import AsyncCaches, Caches
26
29
  from .chats import AsyncChats, Chats
30
+ from .file_search_stores import AsyncFileSearchStores, FileSearchStores
27
31
  from .files import AsyncFiles, Files
28
32
  from .live import AsyncLive
29
33
  from .models import AsyncModels, Models
30
34
  from .operations import AsyncOperations, Operations
35
+ from .tokens import AsyncTokens, Tokens
31
36
  from .tunings import AsyncTunings, Tunings
37
+ from .types import HttpOptions, HttpOptionsDict, HttpRetryOptions
32
38
 
33
39
 
34
40
  class AsyncClient:
@@ -42,7 +48,9 @@ class AsyncClient:
42
48
  self._caches = AsyncCaches(self._api_client)
43
49
  self._batches = AsyncBatches(self._api_client)
44
50
  self._files = AsyncFiles(self._api_client)
51
+ self._file_search_stores = AsyncFileSearchStores(self._api_client)
45
52
  self._live = AsyncLive(self._api_client)
53
+ self._tokens = AsyncTokens(self._api_client)
46
54
  self._operations = AsyncOperations(self._api_client)
47
55
 
48
56
  @property
@@ -57,6 +65,10 @@ class AsyncClient:
57
65
  def caches(self) -> AsyncCaches:
58
66
  return self._caches
59
67
 
68
+ @property
69
+ def file_search_stores(self) -> AsyncFileSearchStores:
70
+ return self._file_search_stores
71
+
60
72
  @property
61
73
  def batches(self) -> AsyncBatches:
62
74
  return self._batches
@@ -73,10 +85,59 @@ class AsyncClient:
73
85
  def live(self) -> AsyncLive:
74
86
  return self._live
75
87
 
88
+ @property
89
+ def auth_tokens(self) -> AsyncTokens:
90
+ return self._tokens
91
+
76
92
  @property
77
93
  def operations(self) -> AsyncOperations:
78
94
  return self._operations
79
95
 
96
+ async def aclose(self) -> None:
97
+ """Closes the async client explicitly.
98
+
99
+ However, it doesn't close the sync client, which can be closed using the
100
+ Client.close() method or using the context manager.
101
+
102
+ Usage:
103
+ .. code-block:: python
104
+
105
+ from google.genai import Client
106
+
107
+ async_client = Client(
108
+ vertexai=True, project='my-project-id', location='us-central1'
109
+ ).aio
110
+ response_1 = await async_client.models.generate_content(
111
+ model='gemini-2.0-flash',
112
+ contents='Hello World',
113
+ )
114
+ response_2 = await async_client.models.generate_content(
115
+ model='gemini-2.0-flash',
116
+ contents='Hello World',
117
+ )
118
+ # Close the client to release resources.
119
+ await async_client.aclose()
120
+ """
121
+ await self._api_client.aclose()
122
+
123
+ async def __aenter__(self) -> 'AsyncClient':
124
+ return self
125
+
126
+ async def __aexit__(
127
+ self,
128
+ exc_type: Optional[Exception],
129
+ exc_value: Optional[Exception],
130
+ traceback: Optional[TracebackType],
131
+ ) -> None:
132
+ await self.aclose()
133
+
134
+ def __del__(self) -> None:
135
+ try:
136
+ asyncio.get_running_loop().create_task(self.aclose())
137
+ except Exception:
138
+ pass
139
+
140
+
80
141
  class DebugConfig(pydantic.BaseModel):
81
142
  """Configuration options that change client network behavior when testing."""
82
143
 
@@ -113,26 +174,29 @@ class Client:
113
174
  Attributes:
114
175
  api_key: The `API key <https://ai.google.dev/gemini-api/docs/api-key>`_ to
115
176
  use for authentication. Applies to the Gemini Developer API only.
116
- vertexai: Indicates whether the client should use the Vertex AI
117
- API endpoints. Defaults to False (uses Gemini Developer API endpoints).
177
+ vertexai: Indicates whether the client should use the Vertex AI API
178
+ endpoints. Defaults to False (uses Gemini Developer API endpoints).
118
179
  Applies to the Vertex AI API only.
119
180
  credentials: The credentials to use for authentication when calling the
120
181
  Vertex AI APIs. Credentials can be obtained from environment variables and
121
- default credentials. For more information, see
122
- `Set up Application Default Credentials
182
+ default credentials. For more information, see `Set up Application Default
183
+ Credentials
123
184
  <https://cloud.google.com/docs/authentication/provide-credentials-adc>`_.
124
185
  Applies to the Vertex AI API only.
125
- project: The `Google Cloud project ID <https://cloud.google.com/vertex-ai/docs/start/cloud-environment>`_ to
126
- use for quota. Can be obtained from environment variables (for example,
186
+ project: The `Google Cloud project ID
187
+ <https://cloud.google.com/vertex-ai/docs/start/cloud-environment>`_ to use
188
+ for quota. Can be obtained from environment variables (for example,
127
189
  ``GOOGLE_CLOUD_PROJECT``). Applies to the Vertex AI API only.
128
- location: The `location <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations>`_
190
+ Find your `Google Cloud project ID <https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects>`_.
191
+ location: The `location
192
+ <https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations>`_
129
193
  to send API requests to (for example, ``us-central1``). Can be obtained
130
194
  from environment variables. Applies to the Vertex AI API only.
131
195
  debug_config: Config settings that control network behavior of the client.
132
196
  This is typically used when running test code.
133
197
  http_options: Http options to use for the client. These options will be
134
- applied to all requests made by the client. Example usage:
135
- `client = genai.Client(http_options=types.HttpOptions(api_version='v1'))`.
198
+ applied to all requests made by the client. Example usage: `client =
199
+ genai.Client(http_options=types.HttpOptions(api_version='v1'))`.
136
200
 
137
201
  Usage for the Gemini Developer API:
138
202
 
@@ -194,6 +258,15 @@ class Client:
194
258
  """
195
259
 
196
260
  self._debug_config = debug_config or DebugConfig()
261
+ if isinstance(http_options, dict):
262
+ http_options = HttpOptions(**http_options)
263
+
264
+ base_url = get_base_url(vertexai or False, http_options)
265
+ if base_url:
266
+ if http_options:
267
+ http_options.base_url = base_url
268
+ else:
269
+ http_options = HttpOptions(base_url=base_url)
197
270
 
198
271
  self._api_client = self._get_api_client(
199
272
  vertexai=vertexai,
@@ -209,8 +282,10 @@ class Client:
209
282
  self._models = Models(self._api_client)
210
283
  self._tunings = Tunings(self._api_client)
211
284
  self._caches = Caches(self._api_client)
285
+ self._file_search_stores = FileSearchStores(self._api_client)
212
286
  self._batches = Batches(self._api_client)
213
287
  self._files = Files(self._api_client)
288
+ self._tokens = Tokens(self._api_client)
214
289
  self._operations = Operations(self._api_client)
215
290
 
216
291
  @staticmethod
@@ -222,17 +297,17 @@ class Client:
222
297
  location: Optional[str] = None,
223
298
  debug_config: Optional[DebugConfig] = None,
224
299
  http_options: Optional[HttpOptions] = None,
225
- ):
300
+ ) -> BaseApiClient:
226
301
  if debug_config and debug_config.client_mode in [
227
302
  'record',
228
303
  'replay',
229
304
  'auto',
230
305
  ]:
231
306
  return ReplayApiClient(
232
- mode=debug_config.client_mode,
233
- replay_id=debug_config.replay_id,
307
+ mode=debug_config.client_mode, # type: ignore[arg-type]
308
+ replay_id=debug_config.replay_id, # type: ignore[arg-type]
234
309
  replays_directory=debug_config.replays_directory,
235
- vertexai=vertexai,
310
+ vertexai=vertexai, # type: ignore[arg-type]
236
311
  api_key=api_key,
237
312
  credentials=credentials,
238
313
  project=project,
@@ -269,6 +344,10 @@ class Client:
269
344
  def caches(self) -> Caches:
270
345
  return self._caches
271
346
 
347
+ @property
348
+ def file_search_stores(self) -> FileSearchStores:
349
+ return self._file_search_stores
350
+
272
351
  @property
273
352
  def batches(self) -> Batches:
274
353
  return self._batches
@@ -277,6 +356,10 @@ class Client:
277
356
  def files(self) -> Files:
278
357
  return self._files
279
358
 
359
+ @property
360
+ def auth_tokens(self) -> Tokens:
361
+ return self._tokens
362
+
280
363
  @property
281
364
  def operations(self) -> Operations:
282
365
  return self._operations
@@ -285,3 +368,47 @@ class Client:
285
368
  def vertexai(self) -> bool:
286
369
  """Returns whether the client is using the Vertex AI API."""
287
370
  return self._api_client.vertexai or False
371
+
372
+ def close(self) -> None:
373
+ """Closes the synchronous client explicitly.
374
+
375
+ However, it doesn't close the async client, which can be closed using the
376
+ Client.aio.aclose() method or using the async context manager.
377
+
378
+ Usage:
379
+ .. code-block:: python
380
+
381
+ from google.genai import Client
382
+
383
+ client = Client(
384
+ vertexai=True, project='my-project-id', location='us-central1'
385
+ )
386
+ response_1 = client.models.generate_content(
387
+ model='gemini-2.0-flash',
388
+ contents='Hello World',
389
+ )
390
+ response_2 = client.models.generate_content(
391
+ model='gemini-2.0-flash',
392
+ contents='Hello World',
393
+ )
394
+ # Close the client to release resources.
395
+ client.close()
396
+ """
397
+ self._api_client.close()
398
+
399
+ def __enter__(self) -> 'Client':
400
+ return self
401
+
402
+ def __exit__(
403
+ self,
404
+ exc_type: Optional[Exception],
405
+ exc_value: Optional[Exception],
406
+ traceback: Optional[TracebackType],
407
+ ) -> None:
408
+ self.close()
409
+
410
+ def __del__(self) -> None:
411
+ try:
412
+ self.close()
413
+ except Exception:
414
+ pass